| // Copyright (c) 1994-2006 Sun Microsystems Inc. |
| // All Rights Reserved. |
| // |
| // Redistribution and use in source and binary forms, with or without |
| // modification, are permitted provided that the following conditions |
| // are met: |
| // |
| // - Redistributions of source code must retain the above copyright notice, |
| // this list of conditions and the following disclaimer. |
| // |
| // - Redistribution in binary form must reproduce the above copyright |
| // notice, this list of conditions and the following disclaimer in the |
| // documentation and/or other materials provided with the |
| // distribution. |
| // |
| // - Neither the name of Sun Microsystems or the names of contributors may |
| // be used to endorse or promote products derived from this software without |
| // specific prior written permission. |
| // |
| // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS |
| // FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE |
| // COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, |
| // INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES |
| // (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
| // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) |
| // HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, |
| // STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
| // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED |
| // OF THE POSSIBILITY OF SUCH DAMAGE. |
| |
| // The original source code covered by the above license above has been modified |
| // significantly by Google Inc. |
| // Copyright 2012 the V8 project authors. All rights reserved. |
| |
| #include "src/ia32/assembler-ia32.h" |
| |
| #include <cstring> |
| |
| #if V8_TARGET_ARCH_IA32 |
| |
| #if V8_LIBC_MSVCRT |
| #include <intrin.h> // _xgetbv() |
| #endif |
| #if V8_OS_MACOSX |
| #include <sys/sysctl.h> |
| #endif |
| |
| #include "src/assembler-inl.h" |
| #include "src/base/bits.h" |
| #include "src/base/cpu.h" |
| #include "src/code-stubs.h" |
| #include "src/conversions-inl.h" |
| #include "src/disassembler.h" |
| #include "src/macro-assembler.h" |
| #include "src/v8.h" |
| |
| namespace v8 { |
| namespace internal { |
| |
| Immediate Immediate::EmbeddedNumber(double value) { |
| int32_t smi; |
| if (DoubleToSmiInteger(value, &smi)) return Immediate(Smi::FromInt(smi)); |
| Immediate result(0, RelocInfo::EMBEDDED_OBJECT); |
| result.is_heap_object_request_ = true; |
| result.value_.heap_object_request = HeapObjectRequest(value); |
| return result; |
| } |
| |
| Immediate Immediate::EmbeddedCode(CodeStub* stub) { |
| Immediate result(0, RelocInfo::CODE_TARGET); |
| result.is_heap_object_request_ = true; |
| result.value_.heap_object_request = HeapObjectRequest(stub); |
| return result; |
| } |
| |
| // ----------------------------------------------------------------------------- |
| // Implementation of CpuFeatures |
| |
| namespace { |
| |
| #if !V8_LIBC_MSVCRT |
| |
| V8_INLINE uint64_t _xgetbv(unsigned int xcr) { |
| unsigned eax, edx; |
| // Check xgetbv; this uses a .byte sequence instead of the instruction |
| // directly because older assemblers do not include support for xgetbv and |
| // there is no easy way to conditionally compile based on the assembler |
| // used. |
| __asm__ volatile(".byte 0x0F, 0x01, 0xD0" : "=a"(eax), "=d"(edx) : "c"(xcr)); |
| return static_cast<uint64_t>(eax) | (static_cast<uint64_t>(edx) << 32); |
| } |
| |
| #define _XCR_XFEATURE_ENABLED_MASK 0 |
| |
| #endif // !V8_LIBC_MSVCRT |
| |
| |
| bool OSHasAVXSupport() { |
| #if V8_OS_MACOSX |
| // Mac OS X up to 10.9 has a bug where AVX transitions were indeed being |
| // caused by ISRs, so we detect that here and disable AVX in that case. |
| char buffer[128]; |
| size_t buffer_size = arraysize(buffer); |
| int ctl_name[] = {CTL_KERN, KERN_OSRELEASE}; |
| if (sysctl(ctl_name, 2, buffer, &buffer_size, nullptr, 0) != 0) { |
| V8_Fatal(__FILE__, __LINE__, "V8 failed to get kernel version"); |
| } |
| // The buffer now contains a string of the form XX.YY.ZZ, where |
| // XX is the major kernel version component. |
| char* period_pos = strchr(buffer, '.'); |
| DCHECK_NOT_NULL(period_pos); |
| *period_pos = '\0'; |
| long kernel_version_major = strtol(buffer, nullptr, 10); // NOLINT |
| if (kernel_version_major <= 13) return false; |
| #endif // V8_OS_MACOSX |
| // Check whether OS claims to support AVX. |
| uint64_t feature_mask = _xgetbv(_XCR_XFEATURE_ENABLED_MASK); |
| return (feature_mask & 0x6) == 0x6; |
| } |
| |
| } // namespace |
| |
| |
| void CpuFeatures::ProbeImpl(bool cross_compile) { |
| base::CPU cpu; |
| CHECK(cpu.has_sse2()); // SSE2 support is mandatory. |
| CHECK(cpu.has_cmov()); // CMOV support is mandatory. |
| |
| // Only use statically determined features for cross compile (snapshot). |
| if (cross_compile) return; |
| |
| if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1; |
| if (cpu.has_ssse3() && FLAG_enable_ssse3) supported_ |= 1u << SSSE3; |
| if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3; |
| if (cpu.has_avx() && FLAG_enable_avx && cpu.has_osxsave() && |
| OSHasAVXSupport()) { |
| supported_ |= 1u << AVX; |
| } |
| if (cpu.has_fma3() && FLAG_enable_fma3 && cpu.has_osxsave() && |
| OSHasAVXSupport()) { |
| supported_ |= 1u << FMA3; |
| } |
| if (cpu.has_bmi1() && FLAG_enable_bmi1) supported_ |= 1u << BMI1; |
| if (cpu.has_bmi2() && FLAG_enable_bmi2) supported_ |= 1u << BMI2; |
| if (cpu.has_lzcnt() && FLAG_enable_lzcnt) supported_ |= 1u << LZCNT; |
| if (cpu.has_popcnt() && FLAG_enable_popcnt) supported_ |= 1u << POPCNT; |
| if (strcmp(FLAG_mcpu, "auto") == 0) { |
| if (cpu.is_atom()) supported_ |= 1u << ATOM; |
| } else if (strcmp(FLAG_mcpu, "atom") == 0) { |
| supported_ |= 1u << ATOM; |
| } |
| } |
| |
| |
| void CpuFeatures::PrintTarget() { } |
| void CpuFeatures::PrintFeatures() { |
| printf( |
| "SSE3=%d SSSE3=%d SSE4_1=%d AVX=%d FMA3=%d BMI1=%d BMI2=%d LZCNT=%d " |
| "POPCNT=%d ATOM=%d\n", |
| CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSSE3), |
| CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(AVX), |
| CpuFeatures::IsSupported(FMA3), CpuFeatures::IsSupported(BMI1), |
| CpuFeatures::IsSupported(BMI2), CpuFeatures::IsSupported(LZCNT), |
| CpuFeatures::IsSupported(POPCNT), CpuFeatures::IsSupported(ATOM)); |
| } |
| |
| |
| // ----------------------------------------------------------------------------- |
| // Implementation of Displacement |
| |
| void Displacement::init(Label* L, Type type) { |
| DCHECK(!L->is_bound()); |
| int next = 0; |
| if (L->is_linked()) { |
| next = L->pos(); |
| DCHECK_GT(next, 0); // Displacements must be at positions > 0 |
| } |
| // Ensure that we _never_ overflow the next field. |
| DCHECK(NextField::is_valid(Assembler::kMaximalBufferSize)); |
| data_ = NextField::encode(next) | TypeField::encode(type); |
| } |
| |
| |
| // ----------------------------------------------------------------------------- |
| // Implementation of RelocInfo |
| |
| const int RelocInfo::kApplyMask = |
| RelocInfo::kCodeTargetMask | 1 << RelocInfo::RUNTIME_ENTRY | |
| 1 << RelocInfo::INTERNAL_REFERENCE | 1 << RelocInfo::JS_TO_WASM_CALL; |
| |
| bool RelocInfo::IsCodedSpecially() { |
| // The deserializer needs to know whether a pointer is specially coded. Being |
| // specially coded on IA32 means that it is a relative address, as used by |
| // branch instructions. These are also the ones that need changing when a |
| // code object moves. |
| return (1 << rmode_) & kApplyMask; |
| } |
| |
| |
| bool RelocInfo::IsInConstantPool() { |
| return false; |
| } |
| |
| Address RelocInfo::embedded_address() const { return Memory::Address_at(pc_); } |
| |
| uint32_t RelocInfo::embedded_size() const { return Memory::uint32_at(pc_); } |
| |
| void RelocInfo::set_embedded_address(Isolate* isolate, Address address, |
| ICacheFlushMode icache_flush_mode) { |
| Memory::Address_at(pc_) = address; |
| if (icache_flush_mode != SKIP_ICACHE_FLUSH) { |
| Assembler::FlushICache(isolate, pc_, sizeof(Address)); |
| } |
| } |
| |
| void RelocInfo::set_embedded_size(Isolate* isolate, uint32_t size, |
| ICacheFlushMode icache_flush_mode) { |
| Memory::uint32_at(pc_) = size; |
| if (icache_flush_mode != SKIP_ICACHE_FLUSH) { |
| Assembler::FlushICache(isolate, pc_, sizeof(uint32_t)); |
| } |
| } |
| |
| void RelocInfo::set_js_to_wasm_address(Isolate* isolate, Address address, |
| ICacheFlushMode icache_flush_mode) { |
| DCHECK_EQ(rmode_, JS_TO_WASM_CALL); |
| Assembler::set_target_address_at(isolate, pc_, constant_pool_, address, |
| icache_flush_mode); |
| } |
| |
| Address RelocInfo::js_to_wasm_address() const { |
| DCHECK_EQ(rmode_, JS_TO_WASM_CALL); |
| return Assembler::target_address_at(pc_, constant_pool_); |
| } |
| |
| // ----------------------------------------------------------------------------- |
| // Implementation of Operand |
| |
| Operand::Operand(Register base, int32_t disp, RelocInfo::Mode rmode) { |
| // [base + disp/r] |
| if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) { |
| // [base] |
| set_modrm(0, base); |
| if (base == esp) set_sib(times_1, esp, base); |
| } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) { |
| // [base + disp8] |
| set_modrm(1, base); |
| if (base == esp) set_sib(times_1, esp, base); |
| set_disp8(disp); |
| } else { |
| // [base + disp/r] |
| set_modrm(2, base); |
| if (base == esp) set_sib(times_1, esp, base); |
| set_dispr(disp, rmode); |
| } |
| } |
| |
| |
| Operand::Operand(Register base, |
| Register index, |
| ScaleFactor scale, |
| int32_t disp, |
| RelocInfo::Mode rmode) { |
| DCHECK(index != esp); // illegal addressing mode |
| // [base + index*scale + disp/r] |
| if (disp == 0 && RelocInfo::IsNone(rmode) && base != ebp) { |
| // [base + index*scale] |
| set_modrm(0, esp); |
| set_sib(scale, index, base); |
| } else if (is_int8(disp) && RelocInfo::IsNone(rmode)) { |
| // [base + index*scale + disp8] |
| set_modrm(1, esp); |
| set_sib(scale, index, base); |
| set_disp8(disp); |
| } else { |
| // [base + index*scale + disp/r] |
| set_modrm(2, esp); |
| set_sib(scale, index, base); |
| set_dispr(disp, rmode); |
| } |
| } |
| |
| |
| Operand::Operand(Register index, |
| ScaleFactor scale, |
| int32_t disp, |
| RelocInfo::Mode rmode) { |
| DCHECK(index != esp); // illegal addressing mode |
| // [index*scale + disp/r] |
| set_modrm(0, esp); |
| set_sib(scale, index, ebp); |
| set_dispr(disp, rmode); |
| } |
| |
| |
| bool Operand::is_reg_only() const { |
| return (buf_[0] & 0xF8) == 0xC0; // Addressing mode is register only. |
| } |
| |
| |
| Register Operand::reg() const { |
| DCHECK(is_reg_only()); |
| return Register::from_code(buf_[0] & 0x07); |
| } |
| |
| void Assembler::AllocateAndInstallRequestedHeapObjects(Isolate* isolate) { |
| for (auto& request : heap_object_requests_) { |
| Handle<HeapObject> object; |
| switch (request.kind()) { |
| case HeapObjectRequest::kHeapNumber: |
| object = isolate->factory()->NewHeapNumber(request.heap_number(), |
| IMMUTABLE, TENURED); |
| break; |
| case HeapObjectRequest::kCodeStub: |
| request.code_stub()->set_isolate(isolate); |
| object = request.code_stub()->GetCode(); |
| break; |
| } |
| Address pc = buffer_ + request.offset(); |
| Memory::Object_Handle_at(pc) = object; |
| } |
| } |
| |
| // ----------------------------------------------------------------------------- |
| // Implementation of Assembler. |
| |
| // Emit a single byte. Must always be inlined. |
| #define EMIT(x) \ |
| *pc_++ = (x) |
| |
| Assembler::Assembler(IsolateData isolate_data, void* buffer, int buffer_size) |
| : AssemblerBase(isolate_data, buffer, buffer_size) { |
| // Clear the buffer in debug mode unless it was provided by the |
| // caller in which case we can't be sure it's okay to overwrite |
| // existing code in it. |
| #ifdef DEBUG |
| if (own_buffer_) { |
| memset(buffer_, 0xCC, buffer_size_); // int3 |
| } |
| #endif |
| |
| reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_); |
| } |
| |
| void Assembler::GetCode(Isolate* isolate, CodeDesc* desc) { |
| // Finalize code (at this point overflow() may be true, but the gap ensures |
| // that we are still not overlapping instructions and relocation info). |
| DCHECK(pc_ <= reloc_info_writer.pos()); // No overlap. |
| |
| AllocateAndInstallRequestedHeapObjects(isolate); |
| |
| // Set up code descriptor. |
| desc->buffer = buffer_; |
| desc->buffer_size = buffer_size_; |
| desc->instr_size = pc_offset(); |
| desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos(); |
| desc->origin = this; |
| desc->constant_pool_size = 0; |
| desc->unwinding_info_size = 0; |
| desc->unwinding_info = nullptr; |
| |
| // Collection stage |
| auto jump_opt = jump_optimization_info(); |
| if (jump_opt && jump_opt->is_collecting()) { |
| auto& bitmap = jump_opt->farjmp_bitmap(); |
| int num = static_cast<int>(farjmp_positions_.size()); |
| if (num && bitmap.empty()) { |
| bool can_opt = false; |
| |
| bitmap.resize((num + 31) / 32, 0); |
| for (int i = 0; i < num; i++) { |
| int disp_pos = farjmp_positions_[i]; |
| int disp = long_at(disp_pos); |
| if (is_int8(disp)) { |
| bitmap[i / 32] |= 1 << (i & 31); |
| can_opt = true; |
| } |
| } |
| if (can_opt) { |
| jump_opt->set_optimizable(); |
| } |
| } |
| } |
| } |
| |
| |
| void Assembler::Align(int m) { |
| DCHECK(base::bits::IsPowerOfTwo(m)); |
| int mask = m - 1; |
| int addr = pc_offset(); |
| Nop((m - (addr & mask)) & mask); |
| } |
| |
| |
| bool Assembler::IsNop(Address addr) { |
| Address a = addr; |
| while (*a == 0x66) a++; |
| if (*a == 0x90) return true; |
| if (a[0] == 0xF && a[1] == 0x1F) return true; |
| return false; |
| } |
| |
| |
| void Assembler::Nop(int bytes) { |
| EnsureSpace ensure_space(this); |
| |
| // Multi byte nops from http://support.amd.com/us/Processor_TechDocs/40546.pdf |
| while (bytes > 0) { |
| switch (bytes) { |
| case 2: |
| EMIT(0x66); |
| case 1: |
| EMIT(0x90); |
| return; |
| case 3: |
| EMIT(0xF); |
| EMIT(0x1F); |
| EMIT(0); |
| return; |
| case 4: |
| EMIT(0xF); |
| EMIT(0x1F); |
| EMIT(0x40); |
| EMIT(0); |
| return; |
| case 6: |
| EMIT(0x66); |
| case 5: |
| EMIT(0xF); |
| EMIT(0x1F); |
| EMIT(0x44); |
| EMIT(0); |
| EMIT(0); |
| return; |
| case 7: |
| EMIT(0xF); |
| EMIT(0x1F); |
| EMIT(0x80); |
| EMIT(0); |
| EMIT(0); |
| EMIT(0); |
| EMIT(0); |
| return; |
| default: |
| case 11: |
| EMIT(0x66); |
| bytes--; |
| case 10: |
| EMIT(0x66); |
| bytes--; |
| case 9: |
| EMIT(0x66); |
| bytes--; |
| case 8: |
| EMIT(0xF); |
| EMIT(0x1F); |
| EMIT(0x84); |
| EMIT(0); |
| EMIT(0); |
| EMIT(0); |
| EMIT(0); |
| EMIT(0); |
| bytes -= 8; |
| } |
| } |
| } |
| |
| |
| void Assembler::CodeTargetAlign() { |
| Align(16); // Preferred alignment of jump targets on ia32. |
| } |
| |
| |
| void Assembler::cpuid() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xA2); |
| } |
| |
| |
| void Assembler::pushad() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x60); |
| } |
| |
| |
| void Assembler::popad() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x61); |
| } |
| |
| |
| void Assembler::pushfd() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x9C); |
| } |
| |
| |
| void Assembler::popfd() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x9D); |
| } |
| |
| |
| void Assembler::push(const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| if (x.is_int8()) { |
| EMIT(0x6A); |
| EMIT(x.immediate()); |
| } else { |
| EMIT(0x68); |
| emit(x); |
| } |
| } |
| |
| |
| void Assembler::push_imm32(int32_t imm32) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x68); |
| emit(imm32); |
| } |
| |
| |
| void Assembler::push(Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x50 | src.code()); |
| } |
| |
| |
| void Assembler::push(const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xFF); |
| emit_operand(esi, src); |
| } |
| |
| |
| void Assembler::pop(Register dst) { |
| DCHECK_NOT_NULL(reloc_info_writer.last_pc()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x58 | dst.code()); |
| } |
| |
| |
| void Assembler::pop(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x8F); |
| emit_operand(eax, dst); |
| } |
| |
| |
| void Assembler::enter(const Immediate& size) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xC8); |
| emit_w(size); |
| EMIT(0); |
| } |
| |
| |
| void Assembler::leave() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xC9); |
| } |
| |
| |
| void Assembler::mov_b(Register dst, const Operand& src) { |
| CHECK(dst.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x8A); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::mov_b(const Operand& dst, const Immediate& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xC6); |
| emit_operand(eax, dst); |
| EMIT(static_cast<int8_t>(src.immediate())); |
| } |
| |
| |
| void Assembler::mov_b(const Operand& dst, Register src) { |
| CHECK(src.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x88); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::mov_w(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x8B); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::mov_w(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x89); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::mov_w(const Operand& dst, const Immediate& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0xC7); |
| emit_operand(eax, dst); |
| EMIT(static_cast<int8_t>(src.immediate() & 0xFF)); |
| EMIT(static_cast<int8_t>(src.immediate() >> 8)); |
| } |
| |
| |
| void Assembler::mov(Register dst, int32_t imm32) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xB8 | dst.code()); |
| emit(imm32); |
| } |
| |
| |
| void Assembler::mov(Register dst, const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xB8 | dst.code()); |
| emit(x); |
| } |
| |
| void Assembler::mov(Register dst, Handle<HeapObject> handle) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xB8 | dst.code()); |
| emit(handle); |
| } |
| |
| |
| void Assembler::mov(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x8B); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::mov(Register dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x89); |
| EMIT(0xC0 | src.code() << 3 | dst.code()); |
| } |
| |
| |
| void Assembler::mov(const Operand& dst, const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xC7); |
| emit_operand(eax, dst); |
| emit(x); |
| } |
| |
| void Assembler::mov(const Operand& dst, Handle<HeapObject> handle) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xC7); |
| emit_operand(eax, dst); |
| emit(handle); |
| } |
| |
| |
| void Assembler::mov(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x89); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::movsx_b(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xBE); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::movsx_w(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xBF); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::movzx_b(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xB6); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::movzx_w(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xB7); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::cmov(Condition cc, Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| // Opcode: 0f 40 + cc /r. |
| EMIT(0x0F); |
| EMIT(0x40 + cc); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::cld() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xFC); |
| } |
| |
| |
| void Assembler::rep_movs() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0xA5); |
| } |
| |
| |
| void Assembler::rep_stos() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0xAB); |
| } |
| |
| |
| void Assembler::stos() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xAB); |
| } |
| |
| |
| void Assembler::xchg(Register dst, Register src) { |
| EnsureSpace ensure_space(this); |
| if (src == eax || dst == eax) { // Single-byte encoding. |
| EMIT(0x90 | (src == eax ? dst.code() : src.code())); |
| } else { |
| EMIT(0x87); |
| EMIT(0xC0 | src.code() << 3 | dst.code()); |
| } |
| } |
| |
| |
| void Assembler::xchg(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x87); |
| emit_operand(dst, src); |
| } |
| |
| void Assembler::xchg_b(Register reg, const Operand& op) { |
| DCHECK(reg.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x86); |
| emit_operand(reg, op); |
| } |
| |
| void Assembler::xchg_w(Register reg, const Operand& op) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x87); |
| emit_operand(reg, op); |
| } |
| |
| void Assembler::lock() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF0); |
| } |
| |
| void Assembler::cmpxchg(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xB1); |
| emit_operand(src, dst); |
| } |
| |
| void Assembler::cmpxchg_b(const Operand& dst, Register src) { |
| DCHECK(src.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xB0); |
| emit_operand(src, dst); |
| } |
| |
| void Assembler::cmpxchg_w(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0xB1); |
| emit_operand(src, dst); |
| } |
| |
| void Assembler::lfence() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xAE); |
| EMIT(0xE8); |
| } |
| |
| void Assembler::adc(Register dst, int32_t imm32) { |
| EnsureSpace ensure_space(this); |
| emit_arith(2, Operand(dst), Immediate(imm32)); |
| } |
| |
| |
| void Assembler::adc(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x13); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::add(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x03); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::add(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x01); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::add(const Operand& dst, const Immediate& x) { |
| DCHECK_NOT_NULL(reloc_info_writer.last_pc()); |
| EnsureSpace ensure_space(this); |
| emit_arith(0, dst, x); |
| } |
| |
| |
| void Assembler::and_(Register dst, int32_t imm32) { |
| and_(dst, Immediate(imm32)); |
| } |
| |
| |
| void Assembler::and_(Register dst, const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| emit_arith(4, Operand(dst), x); |
| } |
| |
| |
| void Assembler::and_(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x23); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::and_(const Operand& dst, const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| emit_arith(4, dst, x); |
| } |
| |
| |
| void Assembler::and_(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x21); |
| emit_operand(src, dst); |
| } |
| |
| void Assembler::cmpb(const Operand& op, Immediate imm8) { |
| DCHECK(imm8.is_int8() || imm8.is_uint8()); |
| EnsureSpace ensure_space(this); |
| if (op.is_reg(eax)) { |
| EMIT(0x3C); |
| } else { |
| EMIT(0x80); |
| emit_operand(edi, op); // edi == 7 |
| } |
| emit_b(imm8); |
| } |
| |
| |
| void Assembler::cmpb(const Operand& op, Register reg) { |
| CHECK(reg.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x38); |
| emit_operand(reg, op); |
| } |
| |
| |
| void Assembler::cmpb(Register reg, const Operand& op) { |
| CHECK(reg.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x3A); |
| emit_operand(reg, op); |
| } |
| |
| |
| void Assembler::cmpw(const Operand& op, Immediate imm16) { |
| DCHECK(imm16.is_int16() || imm16.is_uint16()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x81); |
| emit_operand(edi, op); |
| emit_w(imm16); |
| } |
| |
| void Assembler::cmpw(Register reg, const Operand& op) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x3B); |
| emit_operand(reg, op); |
| } |
| |
| void Assembler::cmpw(const Operand& op, Register reg) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x39); |
| emit_operand(reg, op); |
| } |
| |
| void Assembler::cmp(Register reg, int32_t imm32) { |
| EnsureSpace ensure_space(this); |
| emit_arith(7, Operand(reg), Immediate(imm32)); |
| } |
| |
| void Assembler::cmp(Register reg, Handle<HeapObject> handle) { |
| EnsureSpace ensure_space(this); |
| emit_arith(7, Operand(reg), Immediate(handle)); |
| } |
| |
| |
| void Assembler::cmp(Register reg, const Operand& op) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x3B); |
| emit_operand(reg, op); |
| } |
| |
| void Assembler::cmp(const Operand& op, Register reg) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x39); |
| emit_operand(reg, op); |
| } |
| |
| void Assembler::cmp(const Operand& op, const Immediate& imm) { |
| EnsureSpace ensure_space(this); |
| emit_arith(7, op, imm); |
| } |
| |
| void Assembler::cmp(const Operand& op, Handle<HeapObject> handle) { |
| EnsureSpace ensure_space(this); |
| emit_arith(7, op, Immediate(handle)); |
| } |
| |
| |
| void Assembler::cmpb_al(const Operand& op) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x38); // CMP r/m8, r8 |
| emit_operand(eax, op); // eax has same code as register al. |
| } |
| |
| |
| void Assembler::cmpw_ax(const Operand& op) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x39); // CMP r/m16, r16 |
| emit_operand(eax, op); // eax has same code as register ax. |
| } |
| |
| |
| void Assembler::dec_b(Register dst) { |
| CHECK(dst.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0xFE); |
| EMIT(0xC8 | dst.code()); |
| } |
| |
| |
| void Assembler::dec_b(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xFE); |
| emit_operand(ecx, dst); |
| } |
| |
| |
| void Assembler::dec(Register dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x48 | dst.code()); |
| } |
| |
| |
| void Assembler::dec(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xFF); |
| emit_operand(ecx, dst); |
| } |
| |
| |
| void Assembler::cdq() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x99); |
| } |
| |
| |
| void Assembler::idiv(const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| emit_operand(edi, src); |
| } |
| |
| |
| void Assembler::div(const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| emit_operand(esi, src); |
| } |
| |
| |
| void Assembler::imul(Register reg) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| EMIT(0xE8 | reg.code()); |
| } |
| |
| |
| void Assembler::imul(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xAF); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::imul(Register dst, Register src, int32_t imm32) { |
| imul(dst, Operand(src), imm32); |
| } |
| |
| |
| void Assembler::imul(Register dst, const Operand& src, int32_t imm32) { |
| EnsureSpace ensure_space(this); |
| if (is_int8(imm32)) { |
| EMIT(0x6B); |
| emit_operand(dst, src); |
| EMIT(imm32); |
| } else { |
| EMIT(0x69); |
| emit_operand(dst, src); |
| emit(imm32); |
| } |
| } |
| |
| |
| void Assembler::inc(Register dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x40 | dst.code()); |
| } |
| |
| |
| void Assembler::inc(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xFF); |
| emit_operand(eax, dst); |
| } |
| |
| |
| void Assembler::lea(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x8D); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::mul(Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| EMIT(0xE0 | src.code()); |
| } |
| |
| |
| void Assembler::neg(Register dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| EMIT(0xD8 | dst.code()); |
| } |
| |
| |
| void Assembler::neg(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| emit_operand(ebx, dst); |
| } |
| |
| |
| void Assembler::not_(Register dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| EMIT(0xD0 | dst.code()); |
| } |
| |
| |
| void Assembler::not_(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| emit_operand(edx, dst); |
| } |
| |
| |
| void Assembler::or_(Register dst, int32_t imm32) { |
| EnsureSpace ensure_space(this); |
| emit_arith(1, Operand(dst), Immediate(imm32)); |
| } |
| |
| |
| void Assembler::or_(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0B); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::or_(const Operand& dst, const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| emit_arith(1, dst, x); |
| } |
| |
| |
| void Assembler::or_(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x09); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::rcl(Register dst, uint8_t imm8) { |
| EnsureSpace ensure_space(this); |
| DCHECK(is_uint5(imm8)); // illegal shift count |
| if (imm8 == 1) { |
| EMIT(0xD1); |
| EMIT(0xD0 | dst.code()); |
| } else { |
| EMIT(0xC1); |
| EMIT(0xD0 | dst.code()); |
| EMIT(imm8); |
| } |
| } |
| |
| |
| void Assembler::rcr(Register dst, uint8_t imm8) { |
| EnsureSpace ensure_space(this); |
| DCHECK(is_uint5(imm8)); // illegal shift count |
| if (imm8 == 1) { |
| EMIT(0xD1); |
| EMIT(0xD8 | dst.code()); |
| } else { |
| EMIT(0xC1); |
| EMIT(0xD8 | dst.code()); |
| EMIT(imm8); |
| } |
| } |
| |
| |
| void Assembler::ror(const Operand& dst, uint8_t imm8) { |
| EnsureSpace ensure_space(this); |
| DCHECK(is_uint5(imm8)); // illegal shift count |
| if (imm8 == 1) { |
| EMIT(0xD1); |
| emit_operand(ecx, dst); |
| } else { |
| EMIT(0xC1); |
| emit_operand(ecx, dst); |
| EMIT(imm8); |
| } |
| } |
| |
| |
| void Assembler::ror_cl(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD3); |
| emit_operand(ecx, dst); |
| } |
| |
| |
| void Assembler::sar(const Operand& dst, uint8_t imm8) { |
| EnsureSpace ensure_space(this); |
| DCHECK(is_uint5(imm8)); // illegal shift count |
| if (imm8 == 1) { |
| EMIT(0xD1); |
| emit_operand(edi, dst); |
| } else { |
| EMIT(0xC1); |
| emit_operand(edi, dst); |
| EMIT(imm8); |
| } |
| } |
| |
| |
| void Assembler::sar_cl(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD3); |
| emit_operand(edi, dst); |
| } |
| |
| void Assembler::sbb(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x1B); |
| emit_operand(dst, src); |
| } |
| |
| void Assembler::shld(Register dst, Register src, uint8_t shift) { |
| DCHECK(is_uint5(shift)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xA4); |
| emit_operand(src, Operand(dst)); |
| EMIT(shift); |
| } |
| |
| void Assembler::shld_cl(Register dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xA5); |
| emit_operand(src, Operand(dst)); |
| } |
| |
| |
| void Assembler::shl(const Operand& dst, uint8_t imm8) { |
| EnsureSpace ensure_space(this); |
| DCHECK(is_uint5(imm8)); // illegal shift count |
| if (imm8 == 1) { |
| EMIT(0xD1); |
| emit_operand(esp, dst); |
| } else { |
| EMIT(0xC1); |
| emit_operand(esp, dst); |
| EMIT(imm8); |
| } |
| } |
| |
| |
| void Assembler::shl_cl(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD3); |
| emit_operand(esp, dst); |
| } |
| |
| void Assembler::shr(const Operand& dst, uint8_t imm8) { |
| EnsureSpace ensure_space(this); |
| DCHECK(is_uint5(imm8)); // illegal shift count |
| if (imm8 == 1) { |
| EMIT(0xD1); |
| emit_operand(ebp, dst); |
| } else { |
| EMIT(0xC1); |
| emit_operand(ebp, dst); |
| EMIT(imm8); |
| } |
| } |
| |
| |
| void Assembler::shr_cl(const Operand& dst) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD3); |
| emit_operand(ebp, dst); |
| } |
| |
| void Assembler::shrd(Register dst, Register src, uint8_t shift) { |
| DCHECK(is_uint5(shift)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xAC); |
| emit_operand(dst, Operand(src)); |
| EMIT(shift); |
| } |
| |
| void Assembler::shrd_cl(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xAD); |
| emit_operand(src, dst); |
| } |
| |
| void Assembler::sub(const Operand& dst, const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| emit_arith(5, dst, x); |
| } |
| |
| |
| void Assembler::sub(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x2B); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::sub(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x29); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::test(Register reg, const Immediate& imm) { |
| if (imm.is_uint8()) { |
| test_b(reg, imm); |
| return; |
| } |
| |
| EnsureSpace ensure_space(this); |
| // This is not using emit_arith because test doesn't support |
| // sign-extension of 8-bit operands. |
| if (reg == eax) { |
| EMIT(0xA9); |
| } else { |
| EMIT(0xF7); |
| EMIT(0xC0 | reg.code()); |
| } |
| emit(imm); |
| } |
| |
| |
| void Assembler::test(Register reg, const Operand& op) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x85); |
| emit_operand(reg, op); |
| } |
| |
| |
| void Assembler::test_b(Register reg, const Operand& op) { |
| CHECK(reg.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x84); |
| emit_operand(reg, op); |
| } |
| |
| |
| void Assembler::test(const Operand& op, const Immediate& imm) { |
| if (op.is_reg_only()) { |
| test(op.reg(), imm); |
| return; |
| } |
| if (imm.is_uint8()) { |
| return test_b(op, imm); |
| } |
| EnsureSpace ensure_space(this); |
| EMIT(0xF7); |
| emit_operand(eax, op); |
| emit(imm); |
| } |
| |
| void Assembler::test_b(Register reg, Immediate imm8) { |
| DCHECK(imm8.is_uint8()); |
| EnsureSpace ensure_space(this); |
| // Only use test against byte for registers that have a byte |
| // variant: eax, ebx, ecx, and edx. |
| if (reg == eax) { |
| EMIT(0xA8); |
| emit_b(imm8); |
| } else if (reg.is_byte_register()) { |
| emit_arith_b(0xF6, 0xC0, reg, static_cast<uint8_t>(imm8.immediate())); |
| } else { |
| EMIT(0x66); |
| EMIT(0xF7); |
| EMIT(0xC0 | reg.code()); |
| emit_w(imm8); |
| } |
| } |
| |
| void Assembler::test_b(const Operand& op, Immediate imm8) { |
| if (op.is_reg_only()) { |
| test_b(op.reg(), imm8); |
| return; |
| } |
| EnsureSpace ensure_space(this); |
| EMIT(0xF6); |
| emit_operand(eax, op); |
| emit_b(imm8); |
| } |
| |
| void Assembler::test_w(Register reg, Immediate imm16) { |
| DCHECK(imm16.is_int16() || imm16.is_uint16()); |
| EnsureSpace ensure_space(this); |
| if (reg == eax) { |
| EMIT(0xA9); |
| emit_w(imm16); |
| } else { |
| EMIT(0x66); |
| EMIT(0xF7); |
| EMIT(0xC0 | reg.code()); |
| emit_w(imm16); |
| } |
| } |
| |
| void Assembler::test_w(Register reg, const Operand& op) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x85); |
| emit_operand(reg, op); |
| } |
| |
| void Assembler::test_w(const Operand& op, Immediate imm16) { |
| DCHECK(imm16.is_int16() || imm16.is_uint16()); |
| if (op.is_reg_only()) { |
| test_w(op.reg(), imm16); |
| return; |
| } |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0xF7); |
| emit_operand(eax, op); |
| emit_w(imm16); |
| } |
| |
| void Assembler::xor_(Register dst, int32_t imm32) { |
| EnsureSpace ensure_space(this); |
| emit_arith(6, Operand(dst), Immediate(imm32)); |
| } |
| |
| |
| void Assembler::xor_(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x33); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::xor_(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x31); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::xor_(const Operand& dst, const Immediate& x) { |
| EnsureSpace ensure_space(this); |
| emit_arith(6, dst, x); |
| } |
| |
| |
| void Assembler::bt(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xA3); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::bts(const Operand& dst, Register src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xAB); |
| emit_operand(src, dst); |
| } |
| |
| |
| void Assembler::bsr(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xBD); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::bsf(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xBC); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::hlt() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF4); |
| } |
| |
| |
| void Assembler::int3() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xCC); |
| } |
| |
| |
| void Assembler::nop() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x90); |
| } |
| |
| |
| void Assembler::ret(int imm16) { |
| EnsureSpace ensure_space(this); |
| DCHECK(is_uint16(imm16)); |
| if (imm16 == 0) { |
| EMIT(0xC3); |
| } else { |
| EMIT(0xC2); |
| EMIT(imm16 & 0xFF); |
| EMIT((imm16 >> 8) & 0xFF); |
| } |
| } |
| |
| |
| void Assembler::ud2() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x0B); |
| } |
| |
| |
| // Labels refer to positions in the (to be) generated code. |
| // There are bound, linked, and unused labels. |
| // |
| // Bound labels refer to known positions in the already |
| // generated code. pos() is the position the label refers to. |
| // |
| // Linked labels refer to unknown positions in the code |
| // to be generated; pos() is the position of the 32bit |
| // Displacement of the last instruction using the label. |
| |
| void Assembler::print(const Label* L) { |
| if (L->is_unused()) { |
| PrintF("unused label\n"); |
| } else if (L->is_bound()) { |
| PrintF("bound label to %d\n", L->pos()); |
| } else if (L->is_linked()) { |
| Label l; |
| l.link_to(L->pos()); |
| PrintF("unbound label"); |
| while (l.is_linked()) { |
| Displacement disp = disp_at(&l); |
| PrintF("@ %d ", l.pos()); |
| disp.print(); |
| PrintF("\n"); |
| disp.next(&l); |
| } |
| } else { |
| PrintF("label in inconsistent state (pos = %d)\n", L->pos_); |
| } |
| } |
| |
| |
| void Assembler::bind_to(Label* L, int pos) { |
| EnsureSpace ensure_space(this); |
| DCHECK(0 <= pos && pos <= pc_offset()); // must have a valid binding position |
| while (L->is_linked()) { |
| Displacement disp = disp_at(L); |
| int fixup_pos = L->pos(); |
| if (disp.type() == Displacement::CODE_ABSOLUTE) { |
| long_at_put(fixup_pos, reinterpret_cast<int>(buffer_ + pos)); |
| internal_reference_positions_.push_back(fixup_pos); |
| } else if (disp.type() == Displacement::CODE_RELATIVE) { |
| // Relative to Code* heap object pointer. |
| long_at_put(fixup_pos, pos + Code::kHeaderSize - kHeapObjectTag); |
| } else { |
| if (disp.type() == Displacement::UNCONDITIONAL_JUMP) { |
| DCHECK_EQ(byte_at(fixup_pos - 1), 0xE9); // jmp expected |
| } |
| // Relative address, relative to point after address. |
| int imm32 = pos - (fixup_pos + sizeof(int32_t)); |
| long_at_put(fixup_pos, imm32); |
| } |
| disp.next(L); |
| } |
| while (L->is_near_linked()) { |
| int fixup_pos = L->near_link_pos(); |
| int offset_to_next = |
| static_cast<int>(*reinterpret_cast<int8_t*>(addr_at(fixup_pos))); |
| DCHECK_LE(offset_to_next, 0); |
| // Relative address, relative to point after address. |
| int disp = pos - fixup_pos - sizeof(int8_t); |
| CHECK(0 <= disp && disp <= 127); |
| set_byte_at(fixup_pos, disp); |
| if (offset_to_next < 0) { |
| L->link_to(fixup_pos + offset_to_next, Label::kNear); |
| } else { |
| L->UnuseNear(); |
| } |
| } |
| |
| // Optimization stage |
| auto jump_opt = jump_optimization_info(); |
| if (jump_opt && jump_opt->is_optimizing()) { |
| auto it = label_farjmp_maps_.find(L); |
| if (it != label_farjmp_maps_.end()) { |
| auto& pos_vector = it->second; |
| for (auto fixup_pos : pos_vector) { |
| int disp = pos - (fixup_pos + sizeof(int8_t)); |
| CHECK(is_int8(disp)); |
| set_byte_at(fixup_pos, disp); |
| } |
| label_farjmp_maps_.erase(it); |
| } |
| } |
| L->bind_to(pos); |
| } |
| |
| |
| void Assembler::bind(Label* L) { |
| EnsureSpace ensure_space(this); |
| DCHECK(!L->is_bound()); // label can only be bound once |
| bind_to(L, pc_offset()); |
| } |
| |
| void Assembler::record_farjmp_position(Label* L, int pos) { |
| auto& pos_vector = label_farjmp_maps_[L]; |
| pos_vector.push_back(pos); |
| } |
| |
| bool Assembler::is_optimizable_farjmp(int idx) { |
| if (predictable_code_size()) return false; |
| |
| auto jump_opt = jump_optimization_info(); |
| CHECK(jump_opt->is_optimizing()); |
| |
| auto& bitmap = jump_opt->farjmp_bitmap(); |
| CHECK(idx < static_cast<int>(bitmap.size() * 32)); |
| return !!(bitmap[idx / 32] & (1 << (idx & 31))); |
| } |
| |
| void Assembler::call(Label* L) { |
| EnsureSpace ensure_space(this); |
| if (L->is_bound()) { |
| const int long_size = 5; |
| int offs = L->pos() - pc_offset(); |
| DCHECK_LE(offs, 0); |
| // 1110 1000 #32-bit disp. |
| EMIT(0xE8); |
| emit(offs - long_size); |
| } else { |
| // 1110 1000 #32-bit disp. |
| EMIT(0xE8); |
| emit_disp(L, Displacement::OTHER); |
| } |
| } |
| |
| |
| void Assembler::call(byte* entry, RelocInfo::Mode rmode) { |
| EnsureSpace ensure_space(this); |
| DCHECK(!RelocInfo::IsCodeTarget(rmode)); |
| EMIT(0xE8); |
| if (RelocInfo::IsRuntimeEntry(rmode)) { |
| emit(reinterpret_cast<uint32_t>(entry), rmode); |
| } else { |
| emit(entry - (pc_ + sizeof(int32_t)), rmode); |
| } |
| } |
| |
| void Assembler::wasm_call(Address entry, RelocInfo::Mode rmode) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xE8); |
| emit(reinterpret_cast<intptr_t>(entry), rmode); |
| } |
| |
| int Assembler::CallSize(const Operand& adr) { |
| // Call size is 1 (opcode) + adr.len_ (operand). |
| return 1 + adr.len_; |
| } |
| |
| |
| void Assembler::call(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xFF); |
| emit_operand(edx, adr); |
| } |
| |
| |
| int Assembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode) { |
| return 1 /* EMIT */ + sizeof(uint32_t) /* emit */; |
| } |
| |
| void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) { |
| EnsureSpace ensure_space(this); |
| DCHECK(RelocInfo::IsCodeTarget(rmode)); |
| EMIT(0xE8); |
| emit(code, rmode); |
| } |
| |
| void Assembler::call(CodeStub* stub) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xE8); |
| emit(Immediate::EmbeddedCode(stub)); |
| } |
| |
| void Assembler::jmp(Label* L, Label::Distance distance) { |
| EnsureSpace ensure_space(this); |
| if (L->is_bound()) { |
| const int short_size = 2; |
| const int long_size = 5; |
| int offs = L->pos() - pc_offset(); |
| DCHECK_LE(offs, 0); |
| if (is_int8(offs - short_size)) { |
| // 1110 1011 #8-bit disp. |
| EMIT(0xEB); |
| EMIT((offs - short_size) & 0xFF); |
| } else { |
| // 1110 1001 #32-bit disp. |
| EMIT(0xE9); |
| emit(offs - long_size); |
| } |
| } else if (distance == Label::kNear) { |
| EMIT(0xEB); |
| emit_near_disp(L); |
| } else { |
| auto jump_opt = jump_optimization_info(); |
| if (V8_UNLIKELY(jump_opt)) { |
| if (jump_opt->is_optimizing() && is_optimizable_farjmp(farjmp_num_++)) { |
| EMIT(0xEB); |
| record_farjmp_position(L, pc_offset()); |
| EMIT(0); |
| return; |
| } |
| if (jump_opt->is_collecting()) { |
| farjmp_positions_.push_back(pc_offset() + 1); |
| } |
| } |
| // 1110 1001 #32-bit disp. |
| EMIT(0xE9); |
| emit_disp(L, Displacement::UNCONDITIONAL_JUMP); |
| } |
| } |
| |
| void Assembler::jmp(byte* entry, RelocInfo::Mode rmode) { |
| EnsureSpace ensure_space(this); |
| DCHECK(!RelocInfo::IsCodeTarget(rmode)); |
| EMIT(0xE9); |
| if (RelocInfo::IsRuntimeEntry(rmode)) { |
| emit(reinterpret_cast<uint32_t>(entry), rmode); |
| } else { |
| emit(entry - (pc_ + sizeof(int32_t)), rmode); |
| } |
| } |
| |
| |
| void Assembler::jmp(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xFF); |
| emit_operand(esp, adr); |
| } |
| |
| |
| void Assembler::jmp(Handle<Code> code, RelocInfo::Mode rmode) { |
| EnsureSpace ensure_space(this); |
| DCHECK(RelocInfo::IsCodeTarget(rmode)); |
| EMIT(0xE9); |
| emit(code, rmode); |
| } |
| |
| |
| void Assembler::j(Condition cc, Label* L, Label::Distance distance) { |
| EnsureSpace ensure_space(this); |
| DCHECK(0 <= cc && static_cast<int>(cc) < 16); |
| if (L->is_bound()) { |
| const int short_size = 2; |
| const int long_size = 6; |
| int offs = L->pos() - pc_offset(); |
| DCHECK_LE(offs, 0); |
| if (is_int8(offs - short_size)) { |
| // 0111 tttn #8-bit disp |
| EMIT(0x70 | cc); |
| EMIT((offs - short_size) & 0xFF); |
| } else { |
| // 0000 1111 1000 tttn #32-bit disp |
| EMIT(0x0F); |
| EMIT(0x80 | cc); |
| emit(offs - long_size); |
| } |
| } else if (distance == Label::kNear) { |
| EMIT(0x70 | cc); |
| emit_near_disp(L); |
| } else { |
| auto jump_opt = jump_optimization_info(); |
| if (V8_UNLIKELY(jump_opt)) { |
| if (jump_opt->is_optimizing() && is_optimizable_farjmp(farjmp_num_++)) { |
| // 0111 tttn #8-bit disp |
| EMIT(0x70 | cc); |
| record_farjmp_position(L, pc_offset()); |
| EMIT(0); |
| return; |
| } |
| if (jump_opt->is_collecting()) { |
| farjmp_positions_.push_back(pc_offset() + 2); |
| } |
| } |
| // 0000 1111 1000 tttn #32-bit disp |
| // Note: could eliminate cond. jumps to this jump if condition |
| // is the same however, seems to be rather unlikely case. |
| EMIT(0x0F); |
| EMIT(0x80 | cc); |
| emit_disp(L, Displacement::OTHER); |
| } |
| } |
| |
| |
| void Assembler::j(Condition cc, byte* entry, RelocInfo::Mode rmode) { |
| EnsureSpace ensure_space(this); |
| DCHECK((0 <= cc) && (static_cast<int>(cc) < 16)); |
| // 0000 1111 1000 tttn #32-bit disp. |
| EMIT(0x0F); |
| EMIT(0x80 | cc); |
| if (RelocInfo::IsRuntimeEntry(rmode)) { |
| emit(reinterpret_cast<uint32_t>(entry), rmode); |
| } else { |
| emit(entry - (pc_ + sizeof(int32_t)), rmode); |
| } |
| } |
| |
| |
| void Assembler::j(Condition cc, Handle<Code> code, RelocInfo::Mode rmode) { |
| EnsureSpace ensure_space(this); |
| // 0000 1111 1000 tttn #32-bit disp |
| EMIT(0x0F); |
| EMIT(0x80 | cc); |
| emit(code, rmode); |
| } |
| |
| |
| // FPU instructions. |
| |
| void Assembler::fld(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xD9, 0xC0, i); |
| } |
| |
| |
| void Assembler::fstp(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDD, 0xD8, i); |
| } |
| |
| |
| void Assembler::fld1() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xE8); |
| } |
| |
| |
| void Assembler::fldpi() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xEB); |
| } |
| |
| |
| void Assembler::fldz() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xEE); |
| } |
| |
| |
| void Assembler::fldln2() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xED); |
| } |
| |
| |
| void Assembler::fld_s(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| emit_operand(eax, adr); |
| } |
| |
| |
| void Assembler::fld_d(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDD); |
| emit_operand(eax, adr); |
| } |
| |
| |
| void Assembler::fstp_s(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| emit_operand(ebx, adr); |
| } |
| |
| |
| void Assembler::fst_s(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| emit_operand(edx, adr); |
| } |
| |
| |
| void Assembler::fstp_d(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDD); |
| emit_operand(ebx, adr); |
| } |
| |
| |
| void Assembler::fst_d(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDD); |
| emit_operand(edx, adr); |
| } |
| |
| |
| void Assembler::fild_s(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDB); |
| emit_operand(eax, adr); |
| } |
| |
| |
| void Assembler::fild_d(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDF); |
| emit_operand(ebp, adr); |
| } |
| |
| |
| void Assembler::fistp_s(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDB); |
| emit_operand(ebx, adr); |
| } |
| |
| |
| void Assembler::fisttp_s(const Operand& adr) { |
| DCHECK(IsEnabled(SSE3)); |
| EnsureSpace ensure_space(this); |
| EMIT(0xDB); |
| emit_operand(ecx, adr); |
| } |
| |
| |
| void Assembler::fisttp_d(const Operand& adr) { |
| DCHECK(IsEnabled(SSE3)); |
| EnsureSpace ensure_space(this); |
| EMIT(0xDD); |
| emit_operand(ecx, adr); |
| } |
| |
| |
| void Assembler::fist_s(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDB); |
| emit_operand(edx, adr); |
| } |
| |
| |
| void Assembler::fistp_d(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDF); |
| emit_operand(edi, adr); |
| } |
| |
| |
| void Assembler::fabs() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xE1); |
| } |
| |
| |
| void Assembler::fchs() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xE0); |
| } |
| |
| |
| void Assembler::fcos() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xFF); |
| } |
| |
| |
| void Assembler::fsin() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xFE); |
| } |
| |
| |
| void Assembler::fptan() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xF2); |
| } |
| |
| |
| void Assembler::fyl2x() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xF1); |
| } |
| |
| |
| void Assembler::f2xm1() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xF0); |
| } |
| |
| |
| void Assembler::fscale() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xFD); |
| } |
| |
| |
| void Assembler::fninit() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDB); |
| EMIT(0xE3); |
| } |
| |
| |
| void Assembler::fadd(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDC, 0xC0, i); |
| } |
| |
| |
| void Assembler::fadd_i(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xD8, 0xC0, i); |
| } |
| |
| |
| void Assembler::fsub(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDC, 0xE8, i); |
| } |
| |
| |
| void Assembler::fsub_i(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xD8, 0xE0, i); |
| } |
| |
| |
| void Assembler::fisub_s(const Operand& adr) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDA); |
| emit_operand(esp, adr); |
| } |
| |
| |
| void Assembler::fmul_i(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xD8, 0xC8, i); |
| } |
| |
| |
| void Assembler::fmul(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDC, 0xC8, i); |
| } |
| |
| |
| void Assembler::fdiv(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDC, 0xF8, i); |
| } |
| |
| |
| void Assembler::fdiv_i(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xD8, 0xF0, i); |
| } |
| |
| |
| void Assembler::faddp(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDE, 0xC0, i); |
| } |
| |
| |
| void Assembler::fsubp(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDE, 0xE8, i); |
| } |
| |
| |
| void Assembler::fsubrp(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDE, 0xE0, i); |
| } |
| |
| |
| void Assembler::fmulp(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDE, 0xC8, i); |
| } |
| |
| |
| void Assembler::fdivp(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDE, 0xF8, i); |
| } |
| |
| |
| void Assembler::fprem() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xF8); |
| } |
| |
| |
| void Assembler::fprem1() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xF5); |
| } |
| |
| |
| void Assembler::fxch(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xD9, 0xC8, i); |
| } |
| |
| |
| void Assembler::fincstp() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xF7); |
| } |
| |
| |
| void Assembler::ffree(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDD, 0xC0, i); |
| } |
| |
| |
| void Assembler::ftst() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xE4); |
| } |
| |
| |
| void Assembler::fucomp(int i) { |
| EnsureSpace ensure_space(this); |
| emit_farith(0xDD, 0xE8, i); |
| } |
| |
| |
| void Assembler::fucompp() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDA); |
| EMIT(0xE9); |
| } |
| |
| |
| void Assembler::fucomi(int i) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDB); |
| EMIT(0xE8 + i); |
| } |
| |
| |
| void Assembler::fucomip() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDF); |
| EMIT(0xE9); |
| } |
| |
| |
| void Assembler::fcompp() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDE); |
| EMIT(0xD9); |
| } |
| |
| |
| void Assembler::fnstsw_ax() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDF); |
| EMIT(0xE0); |
| } |
| |
| |
| void Assembler::fwait() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x9B); |
| } |
| |
| |
| void Assembler::frndint() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xD9); |
| EMIT(0xFC); |
| } |
| |
| |
| void Assembler::fnclex() { |
| EnsureSpace ensure_space(this); |
| EMIT(0xDB); |
| EMIT(0xE2); |
| } |
| |
| |
| void Assembler::sahf() { |
| EnsureSpace ensure_space(this); |
| EMIT(0x9E); |
| } |
| |
| |
| void Assembler::setcc(Condition cc, Register reg) { |
| DCHECK(reg.is_byte_register()); |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x90 | cc); |
| EMIT(0xC0 | reg.code()); |
| } |
| |
| |
| void Assembler::cvttss2si(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x2C); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::cvttsd2si(Register dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x2C); |
| emit_operand(dst, src); |
| } |
| |
| |
| void Assembler::cvtsd2si(Register dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x2D); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::cvtsi2ss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x2A); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::cvtsi2sd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x2A); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::cvtss2sd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x5A); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::cvtsd2ss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x5A); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::cvtdq2ps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x5B); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::cvttps2dq(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x5B); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::addsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x58); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::mulsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x59); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::subsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x5C); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::divsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x5E); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::xorpd(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x57); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::andps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x54); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::orps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x56); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::xorps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x57); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::addps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x58); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::subps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x5C); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::mulps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x59); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::divps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x5E); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::rcpps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x53); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::rsqrtps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x52); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::minps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x5D); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::maxps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x5F); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::cmpps(XMMRegister dst, const Operand& src, int8_t cmp) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xC2); |
| emit_sse_operand(dst, src); |
| EMIT(cmp); |
| } |
| |
| void Assembler::sqrtsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x51); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::haddps(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x7C); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::andpd(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x54); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::orpd(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x56); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::ucomisd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x2E); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::roundss(XMMRegister dst, XMMRegister src, RoundingMode mode) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x0A); |
| emit_sse_operand(dst, src); |
| // Mask precision exeption. |
| EMIT(static_cast<byte>(mode) | 0x8); |
| } |
| |
| |
| void Assembler::roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x0B); |
| emit_sse_operand(dst, src); |
| // Mask precision exeption. |
| EMIT(static_cast<byte>(mode) | 0x8); |
| } |
| |
| |
| void Assembler::movmskpd(Register dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x50); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::movmskps(Register dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x50); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::maxsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x5F); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::minsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x5D); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::cmpltsd(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0xC2); |
| emit_sse_operand(dst, src); |
| EMIT(1); // LT == 1 |
| } |
| |
| |
| void Assembler::movaps(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x28); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::movups(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x10); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::movups(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x10); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::movups(const Operand& dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x11); |
| emit_sse_operand(src, dst); |
| } |
| |
| void Assembler::shufps(XMMRegister dst, XMMRegister src, byte imm8) { |
| DCHECK(is_uint8(imm8)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0xC6); |
| emit_sse_operand(dst, src); |
| EMIT(imm8); |
| } |
| |
| |
| void Assembler::movdqa(const Operand& dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x7F); |
| emit_sse_operand(src, dst); |
| } |
| |
| |
| void Assembler::movdqa(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x6F); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::movdqu(const Operand& dst, XMMRegister src ) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x7F); |
| emit_sse_operand(src, dst); |
| } |
| |
| |
| void Assembler::movdqu(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x6F); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::prefetch(const Operand& src, int level) { |
| DCHECK(is_uint2(level)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x18); |
| // Emit hint number in Reg position of RegR/M. |
| XMMRegister code = XMMRegister::from_code(level); |
| emit_sse_operand(code, src); |
| } |
| |
| |
| void Assembler::movsd(const Operand& dst, XMMRegister src ) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); // double |
| EMIT(0x0F); |
| EMIT(0x11); // store |
| emit_sse_operand(src, dst); |
| } |
| |
| |
| void Assembler::movsd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); // double |
| EMIT(0x0F); |
| EMIT(0x10); // load |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::movss(const Operand& dst, XMMRegister src ) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); // float |
| EMIT(0x0F); |
| EMIT(0x11); // store |
| emit_sse_operand(src, dst); |
| } |
| |
| |
| void Assembler::movss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); // float |
| EMIT(0x0F); |
| EMIT(0x10); // load |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::movd(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x6E); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::movd(const Operand& dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x7E); |
| emit_sse_operand(src, dst); |
| } |
| |
| |
| void Assembler::extractps(Register dst, XMMRegister src, byte imm8) { |
| DCHECK(IsEnabled(SSE4_1)); |
| DCHECK(is_uint8(imm8)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x17); |
| emit_sse_operand(src, dst); |
| EMIT(imm8); |
| } |
| |
| void Assembler::ptest(XMMRegister dst, XMMRegister src) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x38); |
| EMIT(0x17); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::psllw(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x71); |
| emit_sse_operand(esi, reg); // esi == 6 |
| EMIT(shift); |
| } |
| |
| void Assembler::pslld(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x72); |
| emit_sse_operand(esi, reg); // esi == 6 |
| EMIT(shift); |
| } |
| |
| void Assembler::psrlw(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x71); |
| emit_sse_operand(edx, reg); // edx == 2 |
| EMIT(shift); |
| } |
| |
| void Assembler::psrld(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x72); |
| emit_sse_operand(edx, reg); // edx == 2 |
| EMIT(shift); |
| } |
| |
| void Assembler::psraw(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x71); |
| emit_sse_operand(esp, reg); // esp == 4 |
| EMIT(shift); |
| } |
| |
| void Assembler::psrad(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x72); |
| emit_sse_operand(esp, reg); // esp == 4 |
| EMIT(shift); |
| } |
| |
| void Assembler::psllq(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x73); |
| emit_sse_operand(esi, reg); // esi == 6 |
| EMIT(shift); |
| } |
| |
| |
| void Assembler::psllq(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0xF3); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::psrlq(XMMRegister reg, int8_t shift) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x73); |
| emit_sse_operand(edx, reg); // edx == 2 |
| EMIT(shift); |
| } |
| |
| |
| void Assembler::psrlq(XMMRegister dst, XMMRegister src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0xD3); |
| emit_sse_operand(dst, src); |
| } |
| |
| void Assembler::pshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF2); |
| EMIT(0x0F); |
| EMIT(0x70); |
| emit_sse_operand(dst, src); |
| EMIT(shuffle); |
| } |
| |
| void Assembler::pshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x70); |
| emit_sse_operand(dst, src); |
| EMIT(shuffle); |
| } |
| |
| void Assembler::pextrb(const Operand& dst, XMMRegister src, int8_t offset) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x14); |
| emit_sse_operand(src, dst); |
| EMIT(offset); |
| } |
| |
| void Assembler::pextrw(const Operand& dst, XMMRegister src, int8_t offset) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x15); |
| emit_sse_operand(src, dst); |
| EMIT(offset); |
| } |
| |
| void Assembler::pextrd(const Operand& dst, XMMRegister src, int8_t offset) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x16); |
| emit_sse_operand(src, dst); |
| EMIT(offset); |
| } |
| |
| void Assembler::insertps(XMMRegister dst, const Operand& src, int8_t offset) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x21); |
| emit_sse_operand(dst, src); |
| EMIT(offset); |
| } |
| |
| void Assembler::pinsrb(XMMRegister dst, const Operand& src, int8_t offset) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x20); |
| emit_sse_operand(dst, src); |
| EMIT(offset); |
| } |
| |
| void Assembler::pinsrw(XMMRegister dst, const Operand& src, int8_t offset) { |
| DCHECK(is_uint8(offset)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0xC4); |
| emit_sse_operand(dst, src); |
| EMIT(offset); |
| } |
| |
| void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) { |
| DCHECK(IsEnabled(SSE4_1)); |
| EnsureSpace ensure_space(this); |
| EMIT(0x66); |
| EMIT(0x0F); |
| EMIT(0x3A); |
| EMIT(0x22); |
| emit_sse_operand(dst, src); |
| EMIT(offset); |
| } |
| |
| |
| void Assembler::addss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x58); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::subss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x5C); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::mulss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x59); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::divss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x5E); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::sqrtss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x51); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::ucomiss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0x0F); |
| EMIT(0x2E); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::maxss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x5F); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| void Assembler::minss(XMMRegister dst, const Operand& src) { |
| EnsureSpace ensure_space(this); |
| EMIT(0xF3); |
| EMIT(0x0F); |
| EMIT(0x5D); |
| emit_sse_operand(dst, src); |
| } |
| |
| |
| // AVX instructions |
| void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1, |
| const Operand& src2) { |
| DCHECK(IsEnabled(FMA3)); |
| EnsureSpace ensure_space(this); |
| emit_vex_prefix(src1, kLIG, k66, k0F38, kW1); |
| EMIT(op); |
| emit_sse_operand(dst, src2); |
| } |
| |
| |
| void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1, |
| const Operand& src2) { |
| DCHECK(IsEnabled(FMA3)); |
| EnsureSpace ensure_space(this); |
| emit_vex_prefix(src1, kLIG, k66, k0F38, kW0); |
| EMIT(op); |
| emit_sse_operand(dst, src2); |
| } |
| |
| |
| void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1, |
| const Operand& src2) { |
| vinstr(op, dst, src1, src2, kF2, k0F, kWIG); |
| } |
| |
| |
| void Assembler::vss(byte op, XMMRegister dst, XMMRegister src1, |
| const Operand& src2) { |
| vinstr(op, dst, src1, src2, kF3, k0F, kWIG); |
| } |
| |
| |
| void Assembler::vps(byte op, XMMRegister dst, XMMRegister src1, |
| const Operand& src2) { |
| vinstr(op, dst, src1, src2, kNone, k0F, kWIG); |
| } |
| |
| |
| void Assembler::vpd(byte op, XMMRegister dst, XMMRegister src1, |
| const Operand& src2) { |
| vinstr(op, dst, src1, src2, k66, k0F, kWIG); |
| } |
| |
| void Assembler::vcmpps(XMMRegister dst, XMMRegister src1, const Operand& src2, |
| int8_t cmp) { |
| vps(0xC2, dst, src1, src2); |
| EMIT(cmp); |
| } |
| |
| void Assembler::vshufps(XMMRegister dst, XMMRegister src1, const Operand& src2, |
| byte imm8) { |
| DCHECK(is_uint8(imm8)); |
| vps(0xC6, dst, src1, src2); |
| EMIT(imm8); |
| } |
| |
| void Assembler::vpsllw(XMMRegister dst, XMMRegister src, int8_t imm8) { |
| XMMRegister iop = XMMRegister::from_code(6); |
| vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG); |
| EMIT(imm8); |
| } |
| |
| void Assembler::vpslld(XMMRegister dst, XMMRegister src, int8_t imm8) { |
| XMMRegister iop = XMMRegister::from_code(6); |
| vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG); |
| EMIT(imm8); |
| } |
| |
| void Assembler::vpsrlw(XMMRegister dst, XMMRegister src, int8_t imm8) { |
| XMMRegister iop = XMMRegister::from_code(2); |
| vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG); |
| EMIT(imm8); |
| } |
| |
| void Assembler::vpsrld(XMMRegister dst, XMMRegister src, int8_t imm8) { |
| XMMRegister iop = XMMRegister::from_code(2); |
| vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG); |
| EMIT(imm8); |
| } |
| |
| void Assembler::vpsraw(XMMRegister dst, XMMRegister src, int8_t imm8) { |
| XMMRegister iop = XMMRegister::from_code(4); |
| vinstr(0x71, iop, dst, Operand(src), k66, k0F, kWIG); |
| EMIT(imm8); |
| } |
| |
| void Assembler::vpsrad(XMMRegister dst, XMMRegister src, int8_t imm8) { |
| XMMRegister iop = XMMRegister::from_code(4); |
| vinstr(0x72, iop, dst, Operand(src), k66, k0F, kWIG); |
| EMIT(imm8); |
| } |
| |
| void Assembler::vpshuflw(XMMRegister dst, const Operand& src, uint8_t shuffle) { |
| vinstr(0x70, dst, xmm0, src, kF2, k0F, kWIG); |
| EMIT(shuffle); |
| } |
| |
| void Assembler::vpshufd(XMMRegister dst, const Operand& src, uint8_t shuffle) { |
| vinstr(0x70, dst, xmm0, src, k66, k0F, kWIG); |
| EMIT(shuffle); |
| } |
| |
| void Assembler::vpextrb(const Operand& dst, XMMRegister src, int8_t offset) { |
| vinstr(0x14, src, xmm0, dst, k66, k0F3A, kWIG); |
| EMIT(offset); |
| } |
| |
| void Assembler::vpextrw(const Operand& dst, XMMRegister src, int8_t offset) { |
| vinstr(0x15, src, xmm0, dst, k66, k0F3A, kWIG); |
| EMIT(offset); |
|