blob: 47ded6a30caa7e95d4c0ee875b2867071e594369 [file] [log] [blame]
// Copyright 2013 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/compiler/code-generator.h"
#include "src/assembler-inl.h"
#include "src/callable.h"
#include "src/compilation-info.h"
#include "src/compiler/code-generator-impl.h"
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/frame-constants.h"
#include "src/frames.h"
#include "src/heap/heap-inl.h"
#include "src/ia32/assembler-ia32.h"
#include "src/ia32/macro-assembler-ia32.h"
namespace v8 {
namespace internal {
namespace compiler {
#define __ tasm()->
#define kScratchDoubleReg xmm0
// Adds IA-32 specific methods for decoding operands.
class IA32OperandConverter : public InstructionOperandConverter {
public:
IA32OperandConverter(CodeGenerator* gen, Instruction* instr)
: InstructionOperandConverter(gen, instr) {}
Operand InputOperand(size_t index, int extra = 0) {
return ToOperand(instr_->InputAt(index), extra);
}
Immediate InputImmediate(size_t index) {
return ToImmediate(instr_->InputAt(index));
}
Operand OutputOperand() { return ToOperand(instr_->Output()); }
Operand ToOperand(InstructionOperand* op, int extra = 0) {
if (op->IsRegister()) {
DCHECK_EQ(0, extra);
return Operand(ToRegister(op));
} else if (op->IsFPRegister()) {
DCHECK_EQ(0, extra);
return Operand(ToDoubleRegister(op));
}
DCHECK(op->IsStackSlot() || op->IsFPStackSlot());
return SlotToOperand(AllocatedOperand::cast(op)->index(), extra);
}
Operand SlotToOperand(int slot, int extra = 0) {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return Operand(offset.from_stack_pointer() ? esp : ebp,
offset.offset() + extra);
}
Immediate ToImmediate(InstructionOperand* operand) {
Constant constant = ToConstant(operand);
if (constant.type() == Constant::kInt32 &&
RelocInfo::IsWasmReference(constant.rmode())) {
return Immediate(reinterpret_cast<Address>(constant.ToInt32()),
constant.rmode());
}
switch (constant.type()) {
case Constant::kInt32:
return Immediate(constant.ToInt32());
case Constant::kFloat32:
return Immediate::EmbeddedNumber(constant.ToFloat32());
case Constant::kFloat64:
return Immediate::EmbeddedNumber(constant.ToFloat64().value());
case Constant::kExternalReference:
return Immediate(constant.ToExternalReference());
case Constant::kHeapObject:
return Immediate(constant.ToHeapObject());
case Constant::kInt64:
break;
case Constant::kRpoNumber:
return Immediate::CodeRelativeOffset(ToLabel(operand));
}
UNREACHABLE();
}
static size_t NextOffset(size_t* offset) {
size_t i = *offset;
(*offset)++;
return i;
}
static ScaleFactor ScaleFor(AddressingMode one, AddressingMode mode) {
STATIC_ASSERT(0 == static_cast<int>(times_1));
STATIC_ASSERT(1 == static_cast<int>(times_2));
STATIC_ASSERT(2 == static_cast<int>(times_4));
STATIC_ASSERT(3 == static_cast<int>(times_8));
int scale = static_cast<int>(mode - one);
DCHECK(scale >= 0 && scale < 4);
return static_cast<ScaleFactor>(scale);
}
Operand MemoryOperand(size_t* offset) {
AddressingMode mode = AddressingModeField::decode(instr_->opcode());
switch (mode) {
case kMode_MR: {
Register base = InputRegister(NextOffset(offset));
int32_t disp = 0;
return Operand(base, disp);
}
case kMode_MRI: {
Register base = InputRegister(NextOffset(offset));
Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
return Operand(base, ctant.ToInt32(), ctant.rmode());
}
case kMode_MR1:
case kMode_MR2:
case kMode_MR4:
case kMode_MR8: {
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1, mode);
int32_t disp = 0;
return Operand(base, index, scale, disp);
}
case kMode_MR1I:
case kMode_MR2I:
case kMode_MR4I:
case kMode_MR8I: {
Register base = InputRegister(NextOffset(offset));
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_MR1I, mode);
Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
return Operand(base, index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_M1:
case kMode_M2:
case kMode_M4:
case kMode_M8: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1, mode);
int32_t disp = 0;
return Operand(index, scale, disp);
}
case kMode_M1I:
case kMode_M2I:
case kMode_M4I:
case kMode_M8I: {
Register index = InputRegister(NextOffset(offset));
ScaleFactor scale = ScaleFor(kMode_M1I, mode);
Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
return Operand(index, scale, ctant.ToInt32(), ctant.rmode());
}
case kMode_MI: {
Constant ctant = ToConstant(instr_->InputAt(NextOffset(offset)));
return Operand(ctant.ToInt32(), ctant.rmode());
}
case kMode_None:
UNREACHABLE();
}
UNREACHABLE();
}
Operand MemoryOperand(size_t first_input = 0) {
return MemoryOperand(&first_input);
}
};
namespace {
bool HasImmediateInput(Instruction* instr, size_t index) {
return instr->InputAt(index)->IsImmediate();
}
class OutOfLineLoadFloat32NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat32NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ xorps(result_, result_);
__ divss(result_, result_);
}
private:
XMMRegister const result_;
};
class OutOfLineLoadFloat64NaN final : public OutOfLineCode {
public:
OutOfLineLoadFloat64NaN(CodeGenerator* gen, XMMRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
__ xorpd(result_, result_);
__ divsd(result_, result_);
}
private:
XMMRegister const result_;
};
class OutOfLineTruncateDoubleToI final : public OutOfLineCode {
public:
OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
XMMRegister input)
: OutOfLineCode(gen),
result_(result),
input_(input),
zone_(gen->zone()) {}
void Generate() final {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(MemOperand(esp, 0), input_);
__ SlowTruncateToIDelayed(zone_, result_);
__ add(esp, Immediate(kDoubleSize));
}
private:
Register const result_;
XMMRegister const input_;
Zone* zone_;
};
class OutOfLineRecordWrite final : public OutOfLineCode {
public:
OutOfLineRecordWrite(CodeGenerator* gen, Register object, Operand operand,
Register value, Register scratch0, Register scratch1,
RecordWriteMode mode)
: OutOfLineCode(gen),
object_(object),
operand_(operand),
value_(value),
scratch0_(scratch0),
scratch1_(scratch1),
mode_(mode),
zone_(gen->zone()) {}
void SaveRegisters(RegList registers) {
DCHECK_LT(0, NumRegs(registers));
for (int i = 0; i < Register::kNumRegisters; ++i) {
if ((registers >> i) & 1u) {
__ push(Register::from_code(i));
}
}
}
void RestoreRegisters(RegList registers) {
DCHECK_LT(0, NumRegs(registers));
for (int i = Register::kNumRegisters - 1; i >= 0; --i) {
if ((registers >> i) & 1u) {
__ pop(Register::from_code(i));
}
}
}
void Generate() final {
if (mode_ > RecordWriteMode::kValueIsPointer) {
__ JumpIfSmi(value_, exit());
}
__ CheckPageFlag(value_, scratch0_,
MemoryChunk::kPointersToHereAreInterestingMask, zero,
exit());
__ lea(scratch1_, operand_);
RememberedSetAction const remembered_set_action =
mode_ > RecordWriteMode::kValueIsMap ? EMIT_REMEMBERED_SET
: OMIT_REMEMBERED_SET;
SaveFPRegsMode const save_fp_mode =
frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
__ CallRecordWriteStub(object_, scratch1_, remembered_set_action,
save_fp_mode);
}
private:
Register const object_;
Operand const operand_;
Register const value_;
Register const scratch0_;
Register const scratch1_;
RecordWriteMode const mode_;
Zone* zone_;
};
} // namespace
#define ASSEMBLE_COMPARE(asm_instr) \
do { \
if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
size_t index = 0; \
Operand left = i.MemoryOperand(&index); \
if (HasImmediateInput(instr, index)) { \
__ asm_instr(left, i.InputImmediate(index)); \
} else { \
__ asm_instr(left, i.InputRegister(index)); \
} \
} else { \
if (HasImmediateInput(instr, 1)) { \
if (instr->InputAt(0)->IsRegister()) { \
__ asm_instr(i.InputRegister(0), i.InputImmediate(1)); \
} else { \
__ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
} \
} else { \
if (instr->InputAt(1)->IsRegister()) { \
__ asm_instr(i.InputRegister(0), i.InputRegister(1)); \
} else { \
__ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
} \
} \
} \
} while (0)
#define ASSEMBLE_IEEE754_BINOP(name) \
do { \
/* Pass two doubles as arguments on the stack. */ \
__ PrepareCallCFunction(4, eax); \
__ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
__ movsd(Operand(esp, 1 * kDoubleSize), i.InputDoubleRegister(1)); \
__ CallCFunction( \
ExternalReference::ieee754_##name##_function(__ isolate()), 4); \
/* Return value is in st(0) on ia32. */ \
/* Store it into the result register. */ \
__ sub(esp, Immediate(kDoubleSize)); \
__ fstp_d(Operand(esp, 0)); \
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
__ add(esp, Immediate(kDoubleSize)); \
} while (false)
#define ASSEMBLE_IEEE754_UNOP(name) \
do { \
/* Pass one double as argument on the stack. */ \
__ PrepareCallCFunction(2, eax); \
__ movsd(Operand(esp, 0 * kDoubleSize), i.InputDoubleRegister(0)); \
__ CallCFunction( \
ExternalReference::ieee754_##name##_function(__ isolate()), 2); \
/* Return value is in st(0) on ia32. */ \
/* Store it into the result register. */ \
__ sub(esp, Immediate(kDoubleSize)); \
__ fstp_d(Operand(esp, 0)); \
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0)); \
__ add(esp, Immediate(kDoubleSize)); \
} while (false)
#define ASSEMBLE_BINOP(asm_instr) \
do { \
if (AddressingModeField::decode(instr->opcode()) != kMode_None) { \
size_t index = 1; \
Operand right = i.MemoryOperand(&index); \
__ asm_instr(i.InputRegister(0), right); \
} else { \
if (HasImmediateInput(instr, 1)) { \
__ asm_instr(i.InputOperand(0), i.InputImmediate(1)); \
} else { \
__ asm_instr(i.InputRegister(0), i.InputOperand(1)); \
} \
} \
} while (0)
#define ASSEMBLE_ATOMIC_BINOP(bin_inst, mov_inst, cmpxchg_inst) \
do { \
Label binop; \
__ bind(&binop); \
__ mov_inst(eax, i.MemoryOperand(1)); \
__ Move(i.TempRegister(0), eax); \
__ bin_inst(i.TempRegister(0), i.InputRegister(0)); \
__ lock(); \
__ cmpxchg_inst(i.MemoryOperand(1), i.TempRegister(0)); \
__ j(not_equal, &binop); \
} while (false)
void CodeGenerator::AssembleDeconstructFrame() {
__ mov(esp, ebp);
__ pop(ebp);
}
void CodeGenerator::AssemblePrepareTailCall() {
if (frame_access_state()->has_frame()) {
__ mov(ebp, MemOperand(ebp, 0));
}
frame_access_state()->SetFrameAccessToSP();
}
void CodeGenerator::AssemblePopArgumentsAdaptorFrame(Register args_reg,
Register, Register,
Register) {
// There are not enough temp registers left on ia32 for a call instruction
// so we pick some scratch registers and save/restore them manually here.
int scratch_count = 3;
Register scratch1 = ebx;
Register scratch2 = ecx;
Register scratch3 = edx;
DCHECK(!AreAliased(args_reg, scratch1, scratch2, scratch3));
Label done;
// Check if current frame is an arguments adaptor frame.
__ cmp(Operand(ebp, StandardFrameConstants::kContextOffset),
Immediate(StackFrame::TypeToMarker(StackFrame::ARGUMENTS_ADAPTOR)));
__ j(not_equal, &done, Label::kNear);
__ push(scratch1);
__ push(scratch2);
__ push(scratch3);
// Load arguments count from current arguments adaptor frame (note, it
// does not include receiver).
Register caller_args_count_reg = scratch1;
__ mov(caller_args_count_reg,
Operand(ebp, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ SmiUntag(caller_args_count_reg);
ParameterCount callee_args_count(args_reg);
__ PrepareForTailCall(callee_args_count, caller_args_count_reg, scratch2,
scratch3, scratch_count);
__ pop(scratch3);
__ pop(scratch2);
__ pop(scratch1);
__ bind(&done);
}
namespace {
void AdjustStackPointerForTailCall(TurboAssembler* tasm,
FrameAccessState* state,
int new_slot_above_sp,
bool allow_shrinkage = true) {
int current_sp_offset = state->GetSPToFPSlotCount() +
StandardFrameConstants::kFixedSlotCountAboveFp;
int stack_slot_delta = new_slot_above_sp - current_sp_offset;
if (stack_slot_delta > 0) {
tasm->sub(esp, Immediate(stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
} else if (allow_shrinkage && stack_slot_delta < 0) {
tasm->add(esp, Immediate(-stack_slot_delta * kPointerSize));
state->IncreaseSPDelta(stack_slot_delta);
}
}
} // namespace
void CodeGenerator::AssembleTailCallBeforeGap(Instruction* instr,
int first_unused_stack_slot) {
CodeGenerator::PushTypeFlags flags(kImmediatePush | kScalarPush);
ZoneVector<MoveOperands*> pushes(zone());
GetPushCompatibleMoves(instr, flags, &pushes);
if (!pushes.empty() &&
(LocationOperand::cast(pushes.back()->destination()).index() + 1 ==
first_unused_stack_slot)) {
IA32OperandConverter g(this, instr);
for (auto move : pushes) {
LocationOperand destination_location(
LocationOperand::cast(move->destination()));
InstructionOperand source(move->source());
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
destination_location.index());
if (source.IsStackSlot()) {
LocationOperand source_location(LocationOperand::cast(source));
__ push(g.SlotToOperand(source_location.index()));
} else if (source.IsRegister()) {
LocationOperand source_location(LocationOperand::cast(source));
__ push(source_location.GetRegister());
} else if (source.IsImmediate()) {
__ push(Immediate(ImmediateOperand::cast(source).inline_value()));
} else {
// Pushes of non-scalar data types is not supported.
UNIMPLEMENTED();
}
frame_access_state()->IncreaseSPDelta(1);
move->Eliminate();
}
}
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot, false);
}
void CodeGenerator::AssembleTailCallAfterGap(Instruction* instr,
int first_unused_stack_slot) {
AdjustStackPointerForTailCall(tasm(), frame_access_state(),
first_unused_stack_slot);
}
// Check if the code object is marked for deoptimization. If it is, then it
// jumps to the CompileLazyDeoptimizedCode builtin. In order to do this we need
// to:
// 1. load the address of the current instruction;
// 2. read from memory the word that contains that bit, which can be found in
// the flags in the referenced {CodeDataContainer} object;
// 3. test kMarkedForDeoptimizationBit in those flags; and
// 4. if it is not zero then it jumps to the builtin.
void CodeGenerator::BailoutIfDeoptimized() {
Label current;
__ call(&current);
int pc = __ pc_offset();
__ bind(&current);
// In order to get the address of the current instruction, we first need
// to use a call and then use a pop, thus pushing the return address to
// the stack and then popping it into the register.
__ pop(ecx);
int offset = Code::kCodeDataContainerOffset - (Code::kHeaderSize + pc);
__ mov(ecx, Operand(ecx, offset));
__ test(FieldOperand(ecx, CodeDataContainer::kKindSpecificFlagsOffset),
Immediate(1 << Code::kMarkedForDeoptimizationBit));
Handle<Code> code = isolate()->builtins()->builtin_handle(
Builtins::kCompileLazyDeoptimizedCode);
__ j(not_zero, code, RelocInfo::CODE_TARGET);
}
// Assembles an instruction after register allocation, producing machine code.
CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
Instruction* instr) {
IA32OperandConverter i(this, instr);
InstructionCode opcode = instr->opcode();
ArchOpcode arch_opcode = ArchOpcodeField::decode(opcode);
switch (arch_opcode) {
case kArchCallCodeObject: {
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
__ call(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchCallWasmFunction: {
if (HasImmediateInput(instr, 0)) {
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
if (info()->IsWasm()) {
__ wasm_call(wasm_code, RelocInfo::WASM_CALL);
} else {
__ call(wasm_code, RelocInfo::JS_TO_WASM_CALL);
}
} else {
Register reg = i.InputRegister(0);
__ call(reg);
}
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchTailCallCodeObjectFromJSFunction:
case kArchTailCallCodeObject: {
if (arch_opcode == kArchTailCallCodeObjectFromJSFunction) {
AssemblePopArgumentsAdaptorFrame(kJavaScriptCallArgCountRegister,
no_reg, no_reg, no_reg);
}
if (HasImmediateInput(instr, 0)) {
Handle<Code> code = i.InputCode(0);
__ jmp(code, RelocInfo::CODE_TARGET);
} else {
Register reg = i.InputRegister(0);
__ add(reg, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ jmp(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallWasm: {
if (HasImmediateInput(instr, 0)) {
Address wasm_code = reinterpret_cast<Address>(
i.ToConstant(instr->InputAt(0)).ToInt32());
if (info()->IsWasm()) {
__ jmp(wasm_code, RelocInfo::WASM_CALL);
} else {
__ jmp(wasm_code, RelocInfo::JS_TO_WASM_CALL);
}
} else {
Register reg = i.InputRegister(0);
__ jmp(reg);
}
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchTailCallAddress: {
CHECK(!HasImmediateInput(instr, 0));
Register reg = i.InputRegister(0);
__ jmp(reg);
frame_access_state()->ClearSPDelta();
frame_access_state()->SetFrameAccessToDefault();
break;
}
case kArchCallJSFunction: {
Register func = i.InputRegister(0);
if (FLAG_debug_code) {
// Check the function's context matches the context argument.
__ cmp(esi, FieldOperand(func, JSFunction::kContextOffset));
__ Assert(equal, AbortReason::kWrongFunctionContext);
}
__ mov(ecx, FieldOperand(func, JSFunction::kCodeOffset));
__ add(ecx, Immediate(Code::kHeaderSize - kHeapObjectTag));
__ call(ecx);
RecordCallPosition(instr);
frame_access_state()->ClearSPDelta();
break;
}
case kArchPrepareCallCFunction: {
// Frame alignment requires using FP-relative frame addressing.
frame_access_state()->SetFrameAccessToFP();
int const num_parameters = MiscField::decode(instr->opcode());
__ PrepareCallCFunction(num_parameters, i.TempRegister(0));
break;
}
case kArchSaveCallerRegisters: {
fp_mode_ =
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode()));
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// kReturnRegister0 should have been saved before entering the stub.
int bytes = __ PushCallerSaved(fp_mode_, kReturnRegister0);
DCHECK_EQ(0, bytes % kPointerSize);
DCHECK_EQ(0, frame_access_state()->sp_delta());
frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
DCHECK(!caller_registers_saved_);
caller_registers_saved_ = true;
break;
}
case kArchRestoreCallerRegisters: {
DCHECK(fp_mode_ ==
static_cast<SaveFPRegsMode>(MiscField::decode(instr->opcode())));
DCHECK(fp_mode_ == kDontSaveFPRegs || fp_mode_ == kSaveFPRegs);
// Don't overwrite the returned value.
int bytes = __ PopCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(-(bytes / kPointerSize));
DCHECK_EQ(0, frame_access_state()->sp_delta());
DCHECK(caller_registers_saved_);
caller_registers_saved_ = false;
break;
}
case kArchPrepareTailCall:
AssemblePrepareTailCall();
break;
case kArchCallCFunction: {
int const num_parameters = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
ExternalReference ref = i.InputExternalReference(0);
__ CallCFunction(ref, num_parameters);
} else {
Register func = i.InputRegister(0);
__ CallCFunction(func, num_parameters);
}
frame_access_state()->SetFrameAccessToDefault();
// Ideally, we should decrement SP delta to match the change of stack
// pointer in CallCFunction. However, for certain architectures (e.g.
// ARM), there may be more strict alignment requirement, causing old SP
// to be saved on the stack. In those cases, we can not calculate the SP
// delta statically.
frame_access_state()->ClearSPDelta();
if (caller_registers_saved_) {
// Need to re-sync SP delta introduced in kArchSaveCallerRegisters.
// Here, we assume the sequence to be:
// kArchSaveCallerRegisters;
// kArchCallCFunction;
// kArchRestoreCallerRegisters;
int bytes =
__ RequiredStackSizeForCallerSaved(fp_mode_, kReturnRegister0);
frame_access_state()->IncreaseSPDelta(bytes / kPointerSize);
}
break;
}
case kArchJmp:
AssembleArchJump(i.InputRpo(0));
break;
case kArchLookupSwitch:
AssembleArchLookupSwitch(instr);
break;
case kArchTableSwitch:
AssembleArchTableSwitch(instr);
break;
case kArchComment: {
Address comment_string = i.InputExternalReference(0).address();
__ RecordComment(reinterpret_cast<const char*>(comment_string));
break;
}
case kArchDebugAbort:
DCHECK(i.InputRegister(0) == edx);
if (!frame_access_state()->has_frame()) {
// We don't actually want to generate a pile of code for this, so just
// claim there is a stack frame, without generating one.
FrameScope scope(tasm(), StackFrame::NONE);
__ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
RelocInfo::CODE_TARGET);
} else {
__ Call(isolate()->builtins()->builtin_handle(Builtins::kAbortJS),
RelocInfo::CODE_TARGET);
}
__ int3();
break;
case kArchDebugBreak:
__ int3();
break;
case kArchNop:
case kArchThrowTerminator:
// don't emit code for nops.
break;
case kArchDeoptimize: {
int deopt_state_id =
BuildTranslation(instr, -1, 0, OutputFrameStateCombine::Ignore());
CodeGenResult result =
AssembleDeoptimizerCall(deopt_state_id, current_source_position_);
if (result != kSuccess) return result;
break;
}
case kArchRet:
AssembleReturn(instr->InputAt(0));
break;
case kArchStackPointer:
__ mov(i.OutputRegister(), esp);
break;
case kArchFramePointer:
__ mov(i.OutputRegister(), ebp);
break;
case kArchParentFramePointer:
if (frame_access_state()->has_frame()) {
__ mov(i.OutputRegister(), Operand(ebp, 0));
} else {
__ mov(i.OutputRegister(), ebp);
}
break;
case kArchTruncateDoubleToI: {
auto result = i.OutputRegister();
auto input = i.InputDoubleRegister(0);
auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
__ cvttsd2si(result, Operand(input));
__ cmp(result, 1);
__ j(overflow, ool->entry());
__ bind(ool->exit());
break;
}
case kArchStoreWithWriteBarrier: {
RecordWriteMode mode =
static_cast<RecordWriteMode>(MiscField::decode(instr->opcode()));
Register object = i.InputRegister(0);
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
Register value = i.InputRegister(index);
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
auto ool = new (zone()) OutOfLineRecordWrite(this, object, operand, value,
scratch0, scratch1, mode);
__ mov(operand, value);
__ CheckPageFlag(object, scratch0,
MemoryChunk::kPointersFromHereAreInterestingMask,
not_zero, ool->entry());
__ bind(ool->exit());
break;
}
case kArchStackSlot: {
FrameOffset offset =
frame_access_state()->GetFrameOffset(i.InputInt32(0));
Register base = offset.from_stack_pointer() ? esp : ebp;
__ lea(i.OutputRegister(), Operand(base, offset.offset()));
break;
}
case kIeee754Float64Acos:
ASSEMBLE_IEEE754_UNOP(acos);
break;
case kIeee754Float64Acosh:
ASSEMBLE_IEEE754_UNOP(acosh);
break;
case kIeee754Float64Asin:
ASSEMBLE_IEEE754_UNOP(asin);
break;
case kIeee754Float64Asinh:
ASSEMBLE_IEEE754_UNOP(asinh);
break;
case kIeee754Float64Atan:
ASSEMBLE_IEEE754_UNOP(atan);
break;
case kIeee754Float64Atanh:
ASSEMBLE_IEEE754_UNOP(atanh);
break;
case kIeee754Float64Atan2:
ASSEMBLE_IEEE754_BINOP(atan2);
break;
case kIeee754Float64Cbrt:
ASSEMBLE_IEEE754_UNOP(cbrt);
break;
case kIeee754Float64Cos:
ASSEMBLE_IEEE754_UNOP(cos);
break;
case kIeee754Float64Cosh:
ASSEMBLE_IEEE754_UNOP(cosh);
break;
case kIeee754Float64Expm1:
ASSEMBLE_IEEE754_UNOP(expm1);
break;
case kIeee754Float64Exp:
ASSEMBLE_IEEE754_UNOP(exp);
break;
case kIeee754Float64Log:
ASSEMBLE_IEEE754_UNOP(log);
break;
case kIeee754Float64Log1p:
ASSEMBLE_IEEE754_UNOP(log1p);
break;
case kIeee754Float64Log2:
ASSEMBLE_IEEE754_UNOP(log2);
break;
case kIeee754Float64Log10:
ASSEMBLE_IEEE754_UNOP(log10);
break;
case kIeee754Float64Pow: {
// TODO(bmeurer): Improve integration of the stub.
if (i.InputDoubleRegister(1) != xmm2) {
__ movaps(xmm2, i.InputDoubleRegister(0));
__ movaps(xmm1, i.InputDoubleRegister(1));
} else {
__ movaps(xmm0, i.InputDoubleRegister(0));
__ movaps(xmm1, xmm2);
__ movaps(xmm2, xmm0);
}
__ CallStubDelayed(new (zone())
MathPowStub(nullptr, MathPowStub::DOUBLE));
__ movaps(i.OutputDoubleRegister(), xmm3);
break;
}
case kIeee754Float64Sin:
ASSEMBLE_IEEE754_UNOP(sin);
break;
case kIeee754Float64Sinh:
ASSEMBLE_IEEE754_UNOP(sinh);
break;
case kIeee754Float64Tan:
ASSEMBLE_IEEE754_UNOP(tan);
break;
case kIeee754Float64Tanh:
ASSEMBLE_IEEE754_UNOP(tanh);
break;
case kIA32Add:
ASSEMBLE_BINOP(add);
break;
case kIA32And:
ASSEMBLE_BINOP(and_);
break;
case kIA32Cmp:
ASSEMBLE_COMPARE(cmp);
break;
case kIA32Cmp16:
ASSEMBLE_COMPARE(cmpw);
break;
case kIA32Cmp8:
ASSEMBLE_COMPARE(cmpb);
break;
case kIA32Test:
ASSEMBLE_COMPARE(test);
break;
case kIA32Test16:
ASSEMBLE_COMPARE(test_w);
break;
case kIA32Test8:
ASSEMBLE_COMPARE(test_b);
break;
case kIA32Imul:
if (HasImmediateInput(instr, 1)) {
__ imul(i.OutputRegister(), i.InputOperand(0), i.InputInt32(1));
} else {
__ imul(i.OutputRegister(), i.InputOperand(1));
}
break;
case kIA32ImulHigh:
__ imul(i.InputRegister(1));
break;
case kIA32UmulHigh:
__ mul(i.InputRegister(1));
break;
case kIA32Idiv:
__ cdq();
__ idiv(i.InputOperand(1));
break;
case kIA32Udiv:
__ Move(edx, Immediate(0));
__ div(i.InputOperand(1));
break;
case kIA32Not:
__ not_(i.OutputOperand());
break;
case kIA32Neg:
__ neg(i.OutputOperand());
break;
case kIA32Or:
ASSEMBLE_BINOP(or_);
break;
case kIA32Xor:
ASSEMBLE_BINOP(xor_);
break;
case kIA32Sub:
ASSEMBLE_BINOP(sub);
break;
case kIA32Shl:
if (HasImmediateInput(instr, 1)) {
__ shl(i.OutputOperand(), i.InputInt5(1));
} else {
__ shl_cl(i.OutputOperand());
}
break;
case kIA32Shr:
if (HasImmediateInput(instr, 1)) {
__ shr(i.OutputOperand(), i.InputInt5(1));
} else {
__ shr_cl(i.OutputOperand());
}
break;
case kIA32Sar:
if (HasImmediateInput(instr, 1)) {
__ sar(i.OutputOperand(), i.InputInt5(1));
} else {
__ sar_cl(i.OutputOperand());
}
break;
case kIA32AddPair: {
// i.OutputRegister(0) == i.InputRegister(0) ... left low word.
// i.InputRegister(1) ... left high word.
// i.InputRegister(2) ... right low word.
// i.InputRegister(3) ... right high word.
bool use_temp = false;
if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
i.OutputRegister(0).code() == i.InputRegister(3).code()) {
// We cannot write to the output register directly, because it would
// overwrite an input for adc. We have to use the temp register.
use_temp = true;
__ Move(i.TempRegister(0), i.InputRegister(0));
__ add(i.TempRegister(0), i.InputRegister(2));
} else {
__ add(i.OutputRegister(0), i.InputRegister(2));
}
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
__ adc(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
break;
}
case kIA32SubPair: {
// i.OutputRegister(0) == i.InputRegister(0) ... left low word.
// i.InputRegister(1) ... left high word.
// i.InputRegister(2) ... right low word.
// i.InputRegister(3) ... right high word.
bool use_temp = false;
if (i.OutputRegister(0).code() == i.InputRegister(1).code() ||
i.OutputRegister(0).code() == i.InputRegister(3).code()) {
// We cannot write to the output register directly, because it would
// overwrite an input for adc. We have to use the temp register.
use_temp = true;
__ Move(i.TempRegister(0), i.InputRegister(0));
__ sub(i.TempRegister(0), i.InputRegister(2));
} else {
__ sub(i.OutputRegister(0), i.InputRegister(2));
}
if (i.OutputRegister(1).code() != i.InputRegister(1).code()) {
__ Move(i.OutputRegister(1), i.InputRegister(1));
}
__ sbb(i.OutputRegister(1), Operand(i.InputRegister(3)));
if (use_temp) {
__ Move(i.OutputRegister(0), i.TempRegister(0));
}
break;
}
case kIA32MulPair: {
__ imul(i.OutputRegister(1), i.InputOperand(0));
__ mov(i.TempRegister(0), i.InputOperand(1));
__ imul(i.TempRegister(0), i.InputOperand(2));
__ add(i.OutputRegister(1), i.TempRegister(0));
__ mov(i.OutputRegister(0), i.InputOperand(0));
// Multiplies the low words and stores them in eax and edx.
__ mul(i.InputRegister(2));
__ add(i.OutputRegister(1), i.TempRegister(0));
break;
}
case kIA32ShlPair:
if (HasImmediateInput(instr, 2)) {
__ ShlPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
} else {
// Shift has been loaded into CL by the register allocator.
__ ShlPair_cl(i.InputRegister(1), i.InputRegister(0));
}
break;
case kIA32ShrPair:
if (HasImmediateInput(instr, 2)) {
__ ShrPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
} else {
// Shift has been loaded into CL by the register allocator.
__ ShrPair_cl(i.InputRegister(1), i.InputRegister(0));
}
break;
case kIA32SarPair:
if (HasImmediateInput(instr, 2)) {
__ SarPair(i.InputRegister(1), i.InputRegister(0), i.InputInt6(2));
} else {
// Shift has been loaded into CL by the register allocator.
__ SarPair_cl(i.InputRegister(1), i.InputRegister(0));
}
break;
case kIA32Ror:
if (HasImmediateInput(instr, 1)) {
__ ror(i.OutputOperand(), i.InputInt5(1));
} else {
__ ror_cl(i.OutputOperand());
}
break;
case kIA32Lzcnt:
__ Lzcnt(i.OutputRegister(), i.InputOperand(0));
break;
case kIA32Tzcnt:
__ Tzcnt(i.OutputRegister(), i.InputOperand(0));
break;
case kIA32Popcnt:
__ Popcnt(i.OutputRegister(), i.InputOperand(0));
break;
case kLFence:
__ lfence();
break;
case kSSEFloat32Cmp:
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat32Add:
__ addss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat32Sub:
__ subss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat32Mul:
__ mulss(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat32Div:
__ divss(i.InputDoubleRegister(0), i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kSSEFloat32Sqrt:
__ sqrtss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat32Abs: {
// TODO(bmeurer): Use 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
__ andps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Neg: {
// TODO(bmeurer): Use 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
__ xorps(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat32Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ roundss(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSEFloat64Cmp:
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat64Add:
__ addsd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat64Sub:
__ subsd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat64Mul:
__ mulsd(i.InputDoubleRegister(0), i.InputOperand(1));
break;
case kSSEFloat64Div:
__ divsd(i.InputDoubleRegister(0), i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
case kSSEFloat32Max: {
Label compare_nan, compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
__ movmskps(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
__ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
case kSSEFloat64Max: {
Label compare_nan, compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(above, &done_compare, Label::kNear);
__ j(below, &compare_swap, Label::kNear);
__ movmskpd(i.TempRegister(0), i.InputDoubleRegister(0));
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
__ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
case kSSEFloat32Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ ucomiss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ ucomiss(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
new (zone()) OutOfLineLoadFloat32NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
__ movmskps(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
__ movss(kScratchDoubleReg, i.InputOperand(1));
__ movmskps(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
__ movss(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ movss(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
case kSSEFloat64Min: {
Label compare_swap, done_compare;
if (instr->InputAt(1)->IsFPRegister()) {
__ ucomisd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ ucomisd(i.InputDoubleRegister(0), i.InputOperand(1));
}
auto ool =
new (zone()) OutOfLineLoadFloat64NaN(this, i.OutputDoubleRegister());
__ j(parity_even, ool->entry());
__ j(below, &done_compare, Label::kNear);
__ j(above, &compare_swap, Label::kNear);
if (instr->InputAt(1)->IsFPRegister()) {
__ movmskpd(i.TempRegister(0), i.InputDoubleRegister(1));
} else {
__ movsd(kScratchDoubleReg, i.InputOperand(1));
__ movmskpd(i.TempRegister(0), kScratchDoubleReg);
}
__ test(i.TempRegister(0), Immediate(1));
__ j(zero, &done_compare, Label::kNear);
__ bind(&compare_swap);
if (instr->InputAt(1)->IsFPRegister()) {
__ movsd(i.InputDoubleRegister(0), i.InputDoubleRegister(1));
} else {
__ movsd(i.InputDoubleRegister(0), i.InputOperand(1));
}
__ bind(&done_compare);
__ bind(ool->exit());
break;
}
case kSSEFloat64Mod: {
// TODO(dcarney): alignment is wrong.
__ sub(esp, Immediate(kDoubleSize));
// Move values to st(0) and st(1).
__ movsd(Operand(esp, 0), i.InputDoubleRegister(1));
__ fld_d(Operand(esp, 0));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
__ fld_d(Operand(esp, 0));
// Loop while fprem isn't done.
Label mod_loop;
__ bind(&mod_loop);
// This instructions traps on all kinds inputs, but we are assuming the
// floating point control word is set to ignore them all.
__ fprem();
// The following 2 instruction implicitly use eax.
__ fnstsw_ax();
__ sahf();
__ j(parity_even, &mod_loop);
// Move output to stack and clean up.
__ fstp(1);
__ fstp_d(Operand(esp, 0));
__ movsd(i.OutputDoubleRegister(), Operand(esp, 0));
__ add(esp, Immediate(kDoubleSize));
break;
}
case kSSEFloat64Abs: {
// TODO(bmeurer): Use 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
__ andpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat64Neg: {
// TODO(bmeurer): Use 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
__ xorpd(i.OutputDoubleRegister(), kScratchDoubleReg);
break;
}
case kSSEFloat64Sqrt:
__ sqrtsd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64Round: {
CpuFeatureScope sse_scope(tasm(), SSE4_1);
RoundingMode const mode =
static_cast<RoundingMode>(MiscField::decode(instr->opcode()));
__ roundsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0), mode);
break;
}
case kSSEFloat32ToFloat64:
__ cvtss2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToFloat32:
__ cvtsd2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat32ToInt32:
__ cvttss2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat32ToUint32: {
Label success;
__ cvttss2si(i.OutputRegister(), i.InputOperand(0));
__ test(i.OutputRegister(), i.OutputRegister());
__ j(positive, &success);
__ Move(kScratchDoubleReg, static_cast<float>(INT32_MIN));
__ addss(kScratchDoubleReg, i.InputOperand(0));
__ cvttss2si(i.OutputRegister(), kScratchDoubleReg);
__ or_(i.OutputRegister(), Immediate(0x80000000));
__ bind(&success);
break;
}
case kSSEFloat64ToInt32:
__ cvttsd2si(i.OutputRegister(), i.InputOperand(0));
break;
case kSSEFloat64ToUint32: {
__ Move(kScratchDoubleReg, -2147483648.0);
__ addsd(kScratchDoubleReg, i.InputOperand(0));
__ cvttsd2si(i.OutputRegister(), kScratchDoubleReg);
__ add(i.OutputRegister(), Immediate(0x80000000));
break;
}
case kSSEInt32ToFloat32:
__ cvtsi2ss(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEUint32ToFloat32: {
Register scratch0 = i.TempRegister(0);
Register scratch1 = i.TempRegister(1);
__ mov(scratch0, i.InputOperand(0));
__ Cvtui2ss(i.OutputDoubleRegister(), scratch0, scratch1);
break;
}
case kSSEInt32ToFloat64:
__ cvtsi2sd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEUint32ToFloat64:
__ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kSSEFloat64ExtractLowWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kSSEFloat64ExtractHighWord32:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0, kDoubleSize / 2));
} else {
__ Pextrd(i.OutputRegister(), i.InputDoubleRegister(0), 1);
}
break;
case kSSEFloat64InsertLowWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 0, true);
break;
case kSSEFloat64InsertHighWord32:
__ Pinsrd(i.OutputDoubleRegister(), i.InputOperand(1), 1, true);
break;
case kSSEFloat64LoadLowWord32:
__ movd(i.OutputDoubleRegister(), i.InputOperand(0));
break;
case kAVXFloat32Add: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vaddss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Sub: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vsubss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Mul: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vmulss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat32Div: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vdivss(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulss depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
case kAVXFloat64Add: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Sub: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Mul: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
break;
}
case kAVXFloat64Div: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
i.InputOperand(1));
// Don't delete this mov. It may improve performance on some CPUs,
// when there is a (v)mulsd depending on the result.
__ movaps(i.OutputDoubleRegister(), i.OutputDoubleRegister());
break;
}
case kAVXFloat32Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 33);
CpuFeatureScope avx_scope(tasm(), AVX);
__ vandps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kAVXFloat32Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 31);
CpuFeatureScope avx_scope(tasm(), AVX);
__ vxorps(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kAVXFloat64Abs: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrlq(kScratchDoubleReg, 1);
CpuFeatureScope avx_scope(tasm(), AVX);
__ vandpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kAVXFloat64Neg: {
// TODO(bmeurer): Use RIP relative 128-bit constants.
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psllq(kScratchDoubleReg, 63);
CpuFeatureScope avx_scope(tasm(), AVX);
__ vxorpd(i.OutputDoubleRegister(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kSSEFloat64SilenceNaN:
__ xorpd(kScratchDoubleReg, kScratchDoubleReg);
__ subsd(i.InputDoubleRegister(0), kScratchDoubleReg);
break;
case kIA32Movsxbl:
__ movsx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movzxbl:
__ movzx_b(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movb: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_b(operand, i.InputInt8(index));
} else {
__ mov_b(operand, i.InputRegister(index));
}
break;
}
case kIA32Movsxwl:
__ movsx_w(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movzxwl:
__ movzx_w(i.OutputRegister(), i.MemoryOperand());
break;
case kIA32Movw: {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov_w(operand, i.InputInt16(index));
} else {
__ mov_w(operand, i.InputRegister(index));
}
break;
}
case kIA32Movl:
if (instr->HasOutput()) {
__ mov(i.OutputRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
if (HasImmediateInput(instr, index)) {
__ mov(operand, i.InputImmediate(index));
} else {
__ mov(operand, i.InputRegister(index));
}
}
break;
case kIA32Movsd:
if (instr->HasOutput()) {
__ movsd(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movsd(operand, i.InputDoubleRegister(index));
}
break;
case kIA32Movss:
if (instr->HasOutput()) {
__ movss(i.OutputDoubleRegister(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ movss(operand, i.InputDoubleRegister(index));
}
break;
case kIA32Movdqu:
if (instr->HasOutput()) {
__ Movdqu(i.OutputSimd128Register(), i.MemoryOperand());
} else {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ Movdqu(operand, i.InputSimd128Register(index));
}
break;
case kIA32BitcastFI:
if (instr->InputAt(0)->IsFPStackSlot()) {
__ mov(i.OutputRegister(), i.InputOperand(0));
} else {
__ movd(i.OutputRegister(), i.InputDoubleRegister(0));
}
break;
case kIA32BitcastIF:
if (instr->InputAt(0)->IsRegister()) {
__ movd(i.OutputDoubleRegister(), i.InputRegister(0));
} else {
__ movss(i.OutputDoubleRegister(), i.InputOperand(0));
}
break;
case kIA32Lea: {
AddressingMode mode = AddressingModeField::decode(instr->opcode());
// Shorten "leal" to "addl", "subl" or "shll" if the register allocation
// and addressing mode just happens to work out. The "addl"/"subl" forms
// in these cases are faster based on measurements.
if (mode == kMode_MI) {
__ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
} else if (i.InputRegister(0) == i.OutputRegister()) {
if (mode == kMode_MRI) {
int32_t constant_summand = i.InputInt32(1);
if (constant_summand > 0) {
__ add(i.OutputRegister(), Immediate(constant_summand));
} else if (constant_summand < 0) {
__ sub(i.OutputRegister(), Immediate(-constant_summand));
}
} else if (mode == kMode_MR1) {
if (i.InputRegister(1) == i.OutputRegister()) {
__ shl(i.OutputRegister(), 1);
} else {
__ add(i.OutputRegister(), i.InputRegister(1));
}
} else if (mode == kMode_M2) {
__ shl(i.OutputRegister(), 1);
} else if (mode == kMode_M4) {
__ shl(i.OutputRegister(), 2);
} else if (mode == kMode_M8) {
__ shl(i.OutputRegister(), 3);
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
} else if (mode == kMode_MR1 &&
i.InputRegister(1) == i.OutputRegister()) {
__ add(i.OutputRegister(), i.InputRegister(0));
} else {
__ lea(i.OutputRegister(), i.MemoryOperand());
}
break;
}
case kIA32PushFloat32:
if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputFloat32(0));
__ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else {
__ movss(kScratchDoubleReg, i.InputOperand(0));
__ sub(esp, Immediate(kFloatSize));
__ movss(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
}
break;
case kIA32PushFloat64:
if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ Move(kScratchDoubleReg, i.InputDouble(0));
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
__ movsd(kScratchDoubleReg, i.InputOperand(0));
__ sub(esp, Immediate(kDoubleSize));
__ movsd(Operand(esp, 0), kScratchDoubleReg);
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
}
break;
case kIA32PushSimd128:
if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kSimd128Size));
__ movups(Operand(esp, 0), i.InputSimd128Register(0));
} else {
__ movups(kScratchDoubleReg, i.InputOperand(0));
__ sub(esp, Immediate(kSimd128Size));
__ movups(Operand(esp, 0), kScratchDoubleReg);
}
frame_access_state()->IncreaseSPDelta(kSimd128Size / kPointerSize);
break;
case kIA32Push:
if (AddressingModeField::decode(instr->opcode()) != kMode_None) {
size_t index = 0;
Operand operand = i.MemoryOperand(&index);
__ push(operand);
frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else if (instr->InputAt(0)->IsFPRegister()) {
__ sub(esp, Immediate(kFloatSize));
__ movsd(Operand(esp, 0), i.InputDoubleRegister(0));
frame_access_state()->IncreaseSPDelta(kFloatSize / kPointerSize);
} else if (HasImmediateInput(instr, 0)) {
__ push(i.InputImmediate(0));
frame_access_state()->IncreaseSPDelta(1);
} else {
__ push(i.InputOperand(0));
frame_access_state()->IncreaseSPDelta(1);
}
break;
case kIA32Poke: {
int slot = MiscField::decode(instr->opcode());
if (HasImmediateInput(instr, 0)) {
__ mov(Operand(esp, slot * kPointerSize), i.InputImmediate(0));
} else {
__ mov(Operand(esp, slot * kPointerSize), i.InputRegister(0));
}
break;
}
case kIA32Peek: {
int reverse_slot = i.InputInt32(0) + 1;
int offset =
FrameSlotToFPOffset(frame()->GetTotalFrameSlotCount() - reverse_slot);
if (instr->OutputAt(0)->IsFPRegister()) {
LocationOperand* op = LocationOperand::cast(instr->OutputAt(0));
if (op->representation() == MachineRepresentation::kFloat64) {
__ movsd(i.OutputDoubleRegister(), Operand(ebp, offset));
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ movss(i.OutputFloatRegister(), Operand(ebp, offset));
}
} else {
__ mov(i.OutputRegister(), Operand(ebp, offset));
}
break;
}
case kSSEF32x4Splat: {
DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
XMMRegister dst = i.OutputSimd128Register();
__ shufps(dst, dst, 0x0);
break;
}
case kAVXF32x4Splat: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src = i.InputFloatRegister(0);
__ vshufps(i.OutputSimd128Register(), src, src, 0x0);
break;
}
case kSSEF32x4ExtractLane: {
DCHECK_EQ(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
XMMRegister dst = i.OutputFloatRegister();
int8_t lane = i.InputInt8(1);
if (lane != 0) {
DCHECK_LT(lane, 4);
__ shufps(dst, dst, lane);
}
break;
}
case kAVXF32x4ExtractLane: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputFloatRegister();
XMMRegister src = i.InputSimd128Register(0);
int8_t lane = i.InputInt8(1);
if (lane == 0) {
if (dst != src) __ vmovaps(dst, src);
} else {
DCHECK_LT(lane, 4);
__ vshufps(dst, src, src, lane);
}
break;
}
case kSSEF32x4ReplaceLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ insertps(i.OutputSimd128Register(), i.InputOperand(2),
i.InputInt8(1) << 4);
break;
}
case kAVXF32x4ReplaceLane: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vinsertps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1) << 4);
break;
}
case kSSEF32x4Abs: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
if (src.is_reg(dst)) {
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ psrld(kScratchDoubleReg, 1);
__ andps(dst, kScratchDoubleReg);
} else {
__ pcmpeqd(dst, dst);
__ psrld(dst, 1);
__ andps(dst, src);
}
break;
}
case kAVXF32x4Abs: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpsrld(kScratchDoubleReg, kScratchDoubleReg, 1);
__ vandps(i.OutputSimd128Register(), kScratchDoubleReg,
i.InputOperand(0));
break;
}
case kSSEF32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
if (src.is_reg(dst)) {
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ pslld(kScratchDoubleReg, 31);
__ xorps(dst, kScratchDoubleReg);
} else {
__ pcmpeqd(dst, dst);
__ pslld(dst, 31);
__ xorps(dst, src);
}
break;
}
case kAVXF32x4Neg: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpslld(kScratchDoubleReg, kScratchDoubleReg, 31);
__ vxorps(i.OutputSimd128Register(), kScratchDoubleReg,
i.InputOperand(0));
break;
}
case kSSEF32x4Add: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ addps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Add: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vaddps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ subps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Sub: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vsubps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Mul: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ mulps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Mul: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vmulps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Min: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ minps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Min: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vminps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Max: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ maxps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Max: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vmaxps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Eq: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ cmpeqps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Eq: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vcmpeqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ cmpneqps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Ne: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vcmpneqps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Lt: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ cmpltps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Lt: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vcmpltps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEF32x4Le: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ cmpleps(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXF32x4Le: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vcmpleps(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kIA32I32x4Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
__ Pshufd(dst, dst, 0x0);
break;
}
case kIA32I32x4ExtractLane: {
__ Pextrd(i.OutputRegister(), i.InputSimd128Register(0), i.InputInt8(1));
break;
}
case kSSEI32x4ReplaceLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pinsrd(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
case kAVXI32x4ReplaceLane: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpinsrd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1));
break;
}
case kIA32I32x4Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
if (src.is_reg(dst)) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Psignd(dst, kScratchDoubleReg);
} else {
__ Pxor(dst, dst);
__ Psubd(dst, src);
}
break;
}
case kSSEI32x4Shl: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pslld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kAVXI32x4Shl: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpslld(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kSSEI32x4ShrS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psrad(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kAVXI32x4ShrS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsrad(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kSSEI32x4Add: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Add: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Sub: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4Mul: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmulld(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Mul: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmulld(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4MinS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4MinS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpminsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4MaxS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4MaxS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmaxsd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4Eq: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4Eq: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqd(i.OutputSimd128Register(), i.InputOperand(1));
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kAVXI32x4Ne: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
kScratchDoubleReg);
break;
}
case kSSEI32x4GtS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpgtd(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4GtS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpgtd(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4GeS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminsd(dst, src);
__ pcmpeqd(dst, src);
break;
}
case kAVXI32x4GeS: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminsd(kScratchDoubleReg, src1, src2);
__ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kSSEI32x4ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psrld(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kAVXI32x4ShrU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsrld(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kSSEI32x4MinU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminud(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4MinU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpminud(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4MaxU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxud(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI32x4MaxU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmaxud(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI32x4GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pmaxud(dst, src);
__ pcmpeqd(dst, src);
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(dst, kScratchDoubleReg);
break;
}
case kAVXI32x4GtU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpmaxud(kScratchDoubleReg, src1, src2);
__ vpcmpeqd(dst, kScratchDoubleReg, src2);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpxor(dst, dst, kScratchDoubleReg);
break;
}
case kSSEI32x4GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminud(dst, src);
__ pcmpeqd(dst, src);
break;
}
case kAVXI32x4GeU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminud(kScratchDoubleReg, src1, src2);
__ vpcmpeqd(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kIA32I16x8Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
__ Pshuflw(dst, dst, 0x0);
__ Pshufd(dst, dst, 0x0);
break;
}
case kIA32I16x8ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrw(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsx_w(dst, dst);
break;
}
case kSSEI16x8ReplaceLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pinsrw(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
case kAVXI16x8ReplaceLane: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpinsrw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1));
break;
}
case kIA32I16x8Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
if (src.is_reg(dst)) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Psignw(dst, kScratchDoubleReg);
} else {
__ Pxor(dst, dst);
__ Psubw(dst, src);
}
break;
}
case kSSEI16x8Shl: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psllw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kAVXI16x8Shl: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsllw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kSSEI16x8ShrS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psraw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kAVXI16x8ShrS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsraw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kSSEI16x8Add: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8Add: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8AddSaturateS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8AddSaturateS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8Sub: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8SubSaturateS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8SubSaturateS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8Mul: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pmullw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8Mul: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmullw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8MinS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pminsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8MinS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpminsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8MaxS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pmaxsw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8MaxS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmaxsw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8Eq: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8Eq: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqw(i.OutputSimd128Register(), i.InputOperand(1));
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kAVXI16x8Ne: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
kScratchDoubleReg);
break;
}
case kSSEI16x8GtS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpgtw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8GtS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpgtw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8GeS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminsw(dst, src);
__ pcmpeqw(dst, src);
break;
}
case kAVXI16x8GeS: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminsw(kScratchDoubleReg, src1, src2);
__ vpcmpeqw(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kSSEI16x8ShrU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psrlw(i.OutputSimd128Register(), i.InputInt8(1));
break;
}
case kAVXI16x8ShrU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsrlw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputInt8(1));
break;
}
case kSSEI16x8AddSaturateU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddusw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8AddSaturateU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8SubSaturateU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubusw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8SubSaturateU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubusw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8MinU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminuw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8MinU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpminuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8MaxU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxuw(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI16x8MaxU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmaxuw(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI16x8GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pmaxuw(dst, src);
__ pcmpeqw(dst, src);
__ pcmpeqw(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(dst, kScratchDoubleReg);
break;
}
case kAVXI16x8GtU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpmaxuw(kScratchDoubleReg, src1, src2);
__ vpcmpeqw(dst, kScratchDoubleReg, src2);
__ vpcmpeqw(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpxor(dst, dst, kScratchDoubleReg);
break;
}
case kSSEI16x8GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminuw(dst, src);
__ pcmpeqw(dst, src);
break;
}
case kAVXI16x8GeU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminuw(kScratchDoubleReg, src1, src2);
__ vpcmpeqw(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kIA32I8x16Splat: {
XMMRegister dst = i.OutputSimd128Register();
__ Movd(dst, i.InputOperand(0));
__ Pxor(kScratchDoubleReg, kScratchDoubleReg);
__ Pshufb(dst, kScratchDoubleReg);
break;
}
case kIA32I8x16ExtractLane: {
Register dst = i.OutputRegister();
__ Pextrb(dst, i.InputSimd128Register(0), i.InputInt8(1));
__ movsx_b(dst, dst);
break;
}
case kSSEI8x16ReplaceLane: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pinsrb(i.OutputSimd128Register(), i.InputOperand(2), i.InputInt8(1));
break;
}
case kAVXI8x16ReplaceLane: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpinsrb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(2), i.InputInt8(1));
break;
}
case kIA32I8x16Neg: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
if (src.is_reg(dst)) {
__ Pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ Psignb(dst, kScratchDoubleReg);
} else {
__ Pxor(dst, dst);
__ Psubb(dst, src);
}
break;
}
case kSSEI8x16Add: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16Add: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16AddSaturateS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16AddSaturateS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16Sub: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16Sub: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16SubSaturateS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16SubSaturateS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16MinS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pminsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16MinS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpminsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16MaxS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
__ pmaxsb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16MaxS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmaxsb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16Eq: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16Eq: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16Ne: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpeqb(i.OutputSimd128Register(), i.InputOperand(1));
__ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(i.OutputSimd128Register(), kScratchDoubleReg);
break;
}
case kAVXI8x16Ne: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
__ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpxor(i.OutputSimd128Register(), i.OutputSimd128Register(),
kScratchDoubleReg);
break;
}
case kSSEI8x16GtS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pcmpgtb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16GtS: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpgtb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16GeS: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
CpuFeatureScope sse_scope(tasm(), SSE4_1);
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminsb(dst, src);
__ pcmpeqb(dst, src);
break;
}
case kAVXI8x16GeS: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminsb(kScratchDoubleReg, src1, src2);
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kSSEI8x16AddSaturateU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ paddusb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16AddSaturateU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpaddusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16SubSaturateU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ psubusb(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16SubSaturateU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpsubusb(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16MinU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pminub(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16MinU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpminub(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16MaxU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pmaxub(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXI8x16MaxU: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpmaxub(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSEI8x16GtU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pmaxub(dst, src);
__ pcmpeqb(dst, src);
__ pcmpeqb(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(dst, kScratchDoubleReg);
break;
}
case kAVXI8x16GtU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister dst = i.OutputSimd128Register();
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpmaxub(kScratchDoubleReg, src1, src2);
__ vpcmpeqb(dst, kScratchDoubleReg, src2);
__ vpcmpeqb(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpxor(dst, dst, kScratchDoubleReg);
break;
}
case kSSEI8x16GeU: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(1);
__ pminub(dst, src);
__ pcmpeqb(dst, src);
break;
}
case kAVXI8x16GeU: {
CpuFeatureScope avx_scope(tasm(), AVX);
XMMRegister src1 = i.InputSimd128Register(0);
Operand src2 = i.InputOperand(1);
__ vpminub(kScratchDoubleReg, src1, src2);
__ vpcmpeqb(i.OutputSimd128Register(), kScratchDoubleReg, src2);
break;
}
case kIA32S128Zero: {
XMMRegister dst = i.OutputSimd128Register();
__ Pxor(dst, dst);
break;
}
case kSSES128Not: {
XMMRegister dst = i.OutputSimd128Register();
Operand src = i.InputOperand(0);
if (src.is_reg(dst)) {
__ pcmpeqd(kScratchDoubleReg, kScratchDoubleReg);
__ pxor(dst, kScratchDoubleReg);
} else {
__ pcmpeqd(dst, dst);
__ pxor(dst, src);
}
break;
}
case kAVXS128Not: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpcmpeqd(kScratchDoubleReg, kScratchDoubleReg, kScratchDoubleReg);
__ vpxor(i.OutputSimd128Register(), kScratchDoubleReg, i.InputOperand(0));
break;
}
case kSSES128And: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pand(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXS128And: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpand(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSES128Or: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ por(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXS128Or: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpor(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kSSES128Xor: {
DCHECK_EQ(i.OutputSimd128Register(), i.InputSimd128Register(0));
__ pxor(i.OutputSimd128Register(), i.InputOperand(1));
break;
}
case kAVXS128Xor: {
CpuFeatureScope avx_scope(tasm(), AVX);
__ vpxor(i.OutputSimd128Register(), i.InputSimd128Register(0),
i.InputOperand(1));
break;
}
case kIA32StackCheck: {
ExternalReference const stack_limit =
ExternalReference::address_of_stack_limit(__ isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
break;
}
case kAtomicExchangeInt8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
case kAtomicExchangeUint8: {
__ xchg_b(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_b(i.InputRegister(0), i.InputRegister(0));
break;
}
case kAtomicExchangeInt16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movsx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
case kAtomicExchangeUint16: {
__ xchg_w(i.InputRegister(0), i.MemoryOperand(1));
__ movzx_w(i.InputRegister(0), i.InputRegister(0));
break;
}
case kAtomicExchangeWord32: {
__ xchg(i.InputRegister(0), i.MemoryOperand(1));
break;
}
case kAtomicCompareExchangeInt8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movsx_b(eax, eax);
break;
}
case kAtomicCompareExchangeUint8: {
__ lock();
__ cmpxchg_b(i.MemoryOperand(2), i.InputRegister(1));
__ movzx_b<