/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
_is_lval = false;
_target = target; switch (rtype) { case relocInfo::oop_type: case relocInfo::metadata_type: // Oops are a special case. Normally they would be their own section // but in cases like icBuffer they are literals in the code stream that // we don't have a section for. We use none so that we get a literal address // which is always patchable. break; case relocInfo::external_word_type:
_rspec = external_word_Relocation::spec(target); break; case relocInfo::internal_word_type:
_rspec = internal_word_Relocation::spec(target); break; case relocInfo::opt_virtual_call_type:
_rspec = opt_virtual_call_Relocation::spec(); break; case relocInfo::static_call_type:
_rspec = static_call_Relocation::spec(); break; case relocInfo::runtime_call_type:
_rspec = runtime_call_Relocation::spec(); break; case relocInfo::poll_type: case relocInfo::poll_return_type:
_rspec = Relocation::spec_simple(rtype); break; case relocInfo::none: break; default:
ShouldNotReachHere(); break;
}
}
// Implementation of Address
#ifdef _LP64
Address Address::make_array(ArrayAddress adr) { // Not implementable on 64bit machines // Should have been handled higher up the call chain.
ShouldNotReachHere(); return Address();
}
// Convert the raw encoding form into the form expected by the constructor for // Address. An index of 4 (rsp) corresponds to having no index, so convert // that to noreg for the Address constructor.
Address Address::make_raw(int base, int index, int scale, int disp, relocInfo::relocType disp_reloc) {
RelocationHolder rspec = RelocationHolder::none; if (disp_reloc != relocInfo::none) {
rspec = Relocation::spec_simple(disp_reloc);
} bool valid_index = index != rsp->encoding(); if (valid_index) {
Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
madr._rspec = rspec; return madr;
} else {
Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
madr._rspec = rspec; return madr;
}
}
// Implementation of Assembler
int AbstractAssembler::code_fill_byte() { return (u_char)'\xF4'; // hlt
}
void Assembler::membar(Membar_mask_bits order_constraint) { // We only have to handle StoreLoad if (order_constraint & StoreLoad) { // All usable chips support "locked" instructions which suffice // as barriers, and are much faster than the alternative of // using cpuid instruction. We use here a locked add [esp-C],0. // This is conveniently otherwise a no-op except for blowing // flags, and introducing a false dependency on target memory // location. We can't do anything with flags, but we can avoid // memory dependencies in the current method by locked-adding // somewhere else on the stack. Doing [esp+C] will collide with // something on stack in current method, hence we go for [esp-C]. // It is convenient since it is almost always in data cache, for // any small C. We need to step back from SP to avoid data // dependencies with other things on below SP (callee-saves, for // example). Without a clear way to figure out the minimal safe // distance from SP, it makes sense to step back the complete // cache line, as this will also avoid possible second-order effects // with locked ops against the cache line. Our choice of offset // is bounded by x86 operand encoding, which should stay within // [-128; +127] to have the 8-byte displacement encoding. // // Any change to this code may need to revisit other places in // the code where this idiom is used, in particular the // orderAccess code.
int offset = -VM_Version::L1_line_size(); if (offset < -128) {
offset = -128;
}
lock();
addl(Address(rsp, offset), 0);// Assert the lock# signal here
}
}
// make this go away someday void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) { if (rtype == relocInfo::none)
emit_int32(data); else
emit_data(data, Relocation::spec_simple(rtype), format);
}
void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
assert(imm_operand == 0, "default format must be immediate in this file");
assert(inst_mark() != NULL, "must be inside InstructionMark"); if (rspec.type() != relocInfo::none) { #ifdef ASSERT
check_relocation(rspec, format); #endif // Do not use AbstractAssembler::relocate, which is not intended for // embedded words. Instead, relocate to the enclosing instruction.
// hack. call32 is too wide for mask so use disp32 if (format == call32_operand)
code_section()->relocate(inst_mark(), rspec, disp32_operand); else
code_section()->relocate(inst_mark(), rspec, format);
}
emit_int32(data);
}
staticint encode(Register r) { int enc = r->encoding(); if (enc >= 8) {
enc -= 8;
} return enc;
}
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
assert(dst->has_byte_register(), "must have byte register");
assert(isByte(op1) && isByte(op2), "wrong opcode");
assert(isByte(imm8), "not a byte");
assert((op1 & 0x01) == 0, "should be 8bit operation");
emit_int24(op1, (op2 | encode(dst)), imm8);
}
void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
assert(isByte(op1) && isByte(op2), "wrong opcode");
assert(op1 == 0x81, "Unexpected opcode"); if (is8bit(imm32)) {
emit_int24(op1 | 0x02, // set sign bit
op2 | encode(dst),
imm32 & 0xFF);
} elseif (dst == rax) { switch (op2) { case 0xD0: emit_int8(0x15); break; // adc case 0xC0: emit_int8(0x05); break; // add case 0xE0: emit_int8(0x25); break; // and case 0xF8: emit_int8(0x3D); break; // cmp case 0xC8: emit_int8(0x0D); break; // or case 0xD8: emit_int8(0x1D); break; // sbb case 0xE8: emit_int8(0x2D); break; // sub case 0xF0: emit_int8(0x35); break; // xor default: ShouldNotReachHere();
}
emit_int32(imm32);
} else {
emit_int16(op1, (op2 | encode(dst)));
emit_int32(imm32);
}
}
// Force generation of a 4 byte immediate value even if it fits into 8bit void Assembler::emit_arith_imm32(int op1, int op2, Register dst, int32_t imm32) {
assert(isByte(op1) && isByte(op2), "wrong opcode");
assert((op1 & 0x01) == 1, "should be 32bit operation");
assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
emit_int16(op1, (op2 | encode(dst)));
emit_int32(imm32);
}
// immediate-to-memory forms void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
assert((op1 & 0x01) == 1, "should be 32bit operation");
assert((op1 & 0x02) == 0, "sign-extension bit should not be set"); if (is8bit(imm32)) {
emit_int8(op1 | 0x02); // set sign bit
emit_operand(rm, adr, 1);
emit_int8(imm32 & 0xFF);
} else {
emit_int8(op1);
emit_operand(rm, adr, 4);
emit_int32(imm32);
}
}
bool Assembler::query_compressed_disp_byte(int disp, bool is_evex_inst, int vector_len, int cur_tuple_type, int in_size_in_bits, int cur_encoding) { int mod_idx = 0; // We will test if the displacement fits the compressed format and if so // apply the compression to the displacement iff the result is8bit. if (VM_Version::supports_evex() && is_evex_inst) { switch (cur_tuple_type) { case EVEX_FV: if ((cur_encoding & VEX_W) == VEX_W) {
mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
} else {
mod_idx = ((cur_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
} break;
bool Assembler::emit_compressed_disp_byte(int &disp) { int mod_idx = 0; // We will test if the displacement fits the compressed format and if so // apply the compression to the displacement iff the result is8bit. if (VM_Version::supports_evex() && _attributes && _attributes->is_evex_instruction()) { int evex_encoding = _attributes->get_evex_encoding(); int tuple_type = _attributes->get_tuple_type(); switch (tuple_type) { case EVEX_FV: if ((evex_encoding & VEX_W) == VEX_W) {
mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 3 : 2;
} else {
mod_idx = ((evex_encoding & EVEX_Rb) == EVEX_Rb) ? 1 : 0;
} break;
inlinevoid Assembler::emit_modrm(int mod, int dst_enc, int src_enc) {
assert((mod & 3) != 0b11, "forbidden"); int modrm = modrm_encoding(mod, dst_enc, src_enc);
emit_int8(modrm);
}
inlinevoid Assembler::emit_modrm_disp8(int mod, int dst_enc, int src_enc, int disp) { int modrm = modrm_encoding(mod, dst_enc, src_enc);
emit_int16(modrm, disp & 0xFF);
}
inlinevoid Assembler::emit_modrm_sib(int mod, int dst_enc, int src_enc,
Address::ScaleFactor scale, int index_enc, int base_enc) { int modrm = modrm_encoding(mod, dst_enc, src_enc); int sib = sib_encoding(scale, index_enc, base_enc);
emit_int16(modrm, sib);
}
inlinevoid Assembler::emit_modrm_sib_disp8(int mod, int dst_enc, int src_enc,
Address::ScaleFactor scale, int index_enc, int base_enc, int disp) { int modrm = modrm_encoding(mod, dst_enc, src_enc); int sib = sib_encoding(scale, index_enc, base_enc);
emit_int24(modrm, sib, disp & 0xFF);
}
void Assembler::emit_operand_helper(int reg_enc, int base_enc, int index_enc,
Address::ScaleFactor scale, int disp,
RelocationHolder const& rspec, int post_addr_length) { bool no_relocation = (rspec.type() == relocInfo::none);
emit_modrm(0b00, reg_enc, 0b101 /* no base */); // Note that the RIP-rel. correction applies to the generated // disp field, but _not_ to the target address in the rspec.
// disp was created by converting the target address minus the pc // at the start of the instruction. That needs more correction here. // intptr_t disp = target - next_ip;
assert(inst_mark() != NULL, "must be inside InstructionMark");
address next_ip = pc() + sizeof(int32_t) + post_addr_length;
int64_t adjusted = disp; // Do rip-rel adjustment for 64bit
LP64_ONLY(adjusted -= (next_ip - inst_mark()));
assert(is_simm32(adjusted), "must be 32bit offset (RIP relative address)");
emit_data((int32_t) adjusted, rspec, disp32_operand);
} else { // base == noreg, index == noreg, no_relocation == true // 32bit never did this, did everything as the rip-rel/disp code above // [disp] ABSOLUTE // [00 reg 100][00 100 101] disp32
emit_modrm_sib(0b00, reg_enc, 0b100 /* no base */,
Address::times_1, 0b100, 0b101);
emit_data(disp, rspec, disp32_operand);
}
}
}
// Secret local extension to Assembler::WhichOperand: #define end_pc_operand (_WhichOperand_limit)
address Assembler::locate_operand(address inst, WhichOperand which) { // Decode the given instruction, and return the address of // an embedded 32-bit operand word.
// If "which" is disp32_operand, selects the displacement portion // of an effective address specifier. // If "which" is imm64_operand, selects the trailing immediate constant. // If "which" is call32_operand, selects the displacement of a call or jump. // Caller is responsible for ensuring that there is such an operand, // and that it is 32/64 bits wide.
// If "which" is end_pc_operand, find the end of the instruction.
address ip = inst; bool is_64bit = false;
debug_only(bool has_disp32 = false); int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
again_after_prefix: switch (0xFF & *ip++) {
// These convenience macros generate groups of "case" labels for the switch. #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3 #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \ case (x)+4: case (x)+5: case (x)+6: case (x)+7 #define REP16(x) REP8((x)+0): \ case REP8((x)+8)
case CS_segment: case SS_segment: case DS_segment: case ES_segment: case FS_segment: case GS_segment: // Seems dubious
LP64_ONLY(assert(false, "shouldn't have that prefix"));
assert(ip == inst+1, "only one prefix allowed"); goto again_after_prefix;
case 0x67: case REX: case REX_B: case REX_X: case REX_XB: case REX_R: case REX_RB: case REX_RX: case REX_RXB:
NOT_LP64(assert(false, "64bit prefixes")); goto again_after_prefix;
case REX_W: case REX_WB: case REX_WX: case REX_WXB: case REX_WR: case REX_WRB: case REX_WRX: case REX_WRXB:
NOT_LP64(assert(false, "64bit prefixes"));
is_64bit = true; goto again_after_prefix;
case 0xFF: // pushq a; decl a; incl a; call a; jmp a case 0x88: // movb a, r case 0x89: // movl a, r case 0x8A: // movb r, a case 0x8B: // movl r, a case 0x8F: // popl a
debug_only(has_disp32 = true); break;
case 0x68: // pushq #32 if (which == end_pc_operand) { return ip + 4;
}
assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate"); return ip; // not produced by emit_operand
case 0x66: // movw ... (size prefix)
again_after_size_prefix2: switch (0xFF & *ip++) { case REX: case REX_B: case REX_X: case REX_XB: case REX_R: case REX_RB: case REX_RX: case REX_RXB: case REX_W: case REX_WB: case REX_WX: case REX_WXB: case REX_WR: case REX_WRB: case REX_WRX: case REX_WRXB:
NOT_LP64(assert(false, "64bit prefix found")); goto again_after_size_prefix2; case 0x8B: // movw r, a case 0x89: // movw a, r
debug_only(has_disp32 = true); break; case 0xC7: // movw a, #16
debug_only(has_disp32 = true);
tail_size = 2; // the imm16 break; case 0x0F: // several SSE/SSE2 variants
ip--; // reparse the 0x0F goto again_after_prefix; default:
ShouldNotReachHere();
} break;
case REP8(0xB8): // movl/q r, #32/#64(oop?) if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4); // these asserts are somewhat nonsensical #ifndef _LP64
assert(which == imm_operand || which == disp32_operand, "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); #else
assert((which == call32_operand || which == imm_operand) && is_64bit ||
which == narrow_oop_operand && !is_64bit, "which %d is_64_bit %d ip " INTPTR_FORMAT, which, is_64bit, p2i(ip)); #endif// _LP64 return ip;
case 0x69: // imul r, a, #32 case 0xC7: // movl a, #32(oop?)
tail_size = 4;
debug_only(has_disp32 = true); // has both kinds of operands! break;
case 0x0F: // movx..., etc. switch (0xFF & *ip++) { case 0x3A: // pcmpestri
tail_size = 1; case 0x38: // ptest, pmovzxbw
ip++; // skip opcode
debug_only(has_disp32 = true); // has both kinds of operands! break;
case 0x70: // pshufd r, r/a, #8
debug_only(has_disp32 = true); // has both kinds of operands! case 0x73: // psrldq r, #8
tail_size = 1; break;
case 0x10: // movups case 0x11: // movups case 0x12: // movlps case 0x28: // movaps case 0x2E: // ucomiss case 0x2F: // comiss case 0x54: // andps case 0x55: // andnps case 0x56: // orps case 0x57: // xorps case 0x58: // addpd case 0x59: // mulpd case 0x6E: // movd case 0x7E: // movd case 0x6F: // movdq case 0x7F: // movdq case 0xAE: // ldmxcsr, stmxcsr, fxrstor, fxsave, clflush case 0xFE: // paddd
debug_only(has_disp32 = true); break;
case 0xAD: // shrd r, a, %cl case 0xAF: // imul r, a case 0xBE: // movsbl r, a (movsxb) case 0xBF: // movswl r, a (movsxw) case 0xB6: // movzbl r, a (movzxb) case 0xB7: // movzwl r, a (movzxw) case REP16(0x40): // cmovl cc, r, a case 0xB0: // cmpxchgb case 0xB1: // cmpxchg case 0xC1: // xaddl case 0xC7: // cmpxchg8 case REP16(0x90): // setcc a
debug_only(has_disp32 = true); // fall out of the switch to decode the address break;
case 0xC4: // pinsrw r, a, #8
debug_only(has_disp32 = true); case 0xC5: // pextrw r, r, #8
tail_size = 1; // the imm8 break;
case 0xAC: // shrd r, a, #8
debug_only(has_disp32 = true);
tail_size = 1; // the imm8 break;
case REP16(0x80): // jcc rdisp32 if (which == end_pc_operand) return ip + 4;
assert(which == call32_operand, "jcc has no disp32 or imm"); return ip; default:
ShouldNotReachHere();
} break;
case 0x81: // addl a, #32; addl r, #32 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl // on 32bit in the case of cmpl, the imm might be an oop
tail_size = 4;
debug_only(has_disp32 = true); // has both kinds of operands! break;
case 0x83: // addl a, #8; addl r, #8 // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
debug_only(has_disp32 = true); // has both kinds of operands!
tail_size = 1; break;
case 0x15: // adc rax, #32 case 0x05: // add rax, #32 case 0x25: // and rax, #32 case 0x3D: // cmp rax, #32 case 0x0D: // or rax, #32 case 0x1D: // sbb rax, #32 case 0x2D: // sub rax, #32 case 0x35: // xor rax, #32 return which == end_pc_operand ? ip + 4 : ip;
case 0x9B: switch (0xFF & *ip++) { case 0xD9: // fnstcw a
debug_only(has_disp32 = true); break; default:
ShouldNotReachHere();
} break;
case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a case REP4(0x10): // adc... case REP4(0x20): // and... case REP4(0x30): // xor... case REP4(0x08): // or... case REP4(0x18): // sbb... case REP4(0x28): // sub... case 0xF7: // mull a case 0x8D: // lea r, a case 0x87: // xchg r, a case REP4(0x38): // cmp... case 0x85: // test r, a
debug_only(has_disp32 = true); // has both kinds of operands! break;
case 0xA8: // testb rax, #8 return which == end_pc_operand ? ip + 1 : ip; case 0xA9: // testl/testq rax, #32 return which == end_pc_operand ? ip + 4 : ip;
case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8 case 0xC6: // movb a, #8 case 0x80: // cmpb a, #8 case 0x6B: // imul r, a, #8
debug_only(has_disp32 = true); // has both kinds of operands!
tail_size = 1; // the imm8 break;
case 0xC4: // VEX_3bytes case 0xC5: // VEX_2bytes
assert((UseAVX > 0), "shouldn't have VEX prefix");
assert(ip == inst+1, "no prefixes allowed"); // C4 and C5 are also used as opcodes for PINSRW and PEXTRW instructions // but they have prefix 0x0F and processed when 0x0F processed above. // // In 32-bit mode the VEX first byte C4 and C5 alias onto LDS and LES // instructions (these instructions are not supported in 64-bit mode). // To distinguish them bits [7:6] are set in the VEX second byte since // ModRM byte can not be of the form 11xxxxxx in 32-bit mode. To set // those VEX bits REX and vvvv bits are inverted. // // Fortunately C2 doesn't generate these instructions so we don't need // to check for them in product version.
// Check second byte
NOT_LP64(assert((0xC0 & *ip) == 0xC0, "shouldn't have LDS and LES instructions"));
int vex_opcode; // First byte if ((0xFF & *inst) == VEX_3bytes) {
vex_opcode = VEX_OPCODE_MASK & *ip;
ip++; // third byte
is_64bit = ((VEX_W & *ip) == VEX_W);
} else {
vex_opcode = VEX_OPCODE_0F;
}
ip++; // opcode // To find the end of instruction (which == end_pc_operand). switch (vex_opcode) { case VEX_OPCODE_0F: switch (0xFF & *ip) { case 0x70: // pshufd r, r/a, #8 case 0x71: // ps[rl|ra|ll]w r, #8 case 0x72: // ps[rl|ra|ll]d r, #8 case 0x73: // ps[rl|ra|ll]q r, #8 case 0xC2: // cmp[ps|pd|ss|sd] r, r, r/a, #8 case 0xC4: // pinsrw r, r, r/a, #8 case 0xC5: // pextrw r/a, r, #8 case 0xC6: // shufp[s|d] r, r, r/a, #8
tail_size = 1; // the imm8 break;
} break; case VEX_OPCODE_0F_3A:
tail_size = 1; break;
}
ip++; // skip opcode
debug_only(has_disp32 = true); // has both kinds of operands! break;
case 0x62: // EVEX_4bytes
assert(VM_Version::supports_evex(), "shouldn't have EVEX prefix");
assert(ip == inst+1, "no prefixes allowed"); // no EVEX collisions, all instructions that have 0x62 opcodes // have EVEX versions and are subopcodes of 0x66
ip++; // skip P0 and examine W in P1
is_64bit = ((VEX_W & *ip) == VEX_W);
ip++; // move to P2
ip++; // skip P2, move to opcode // To find the end of instruction (which == end_pc_operand). switch (0xFF & *ip) { case 0x22: // pinsrd r, r/a, #8 case 0x61: // pcmpestri r, r/a, #8 case 0x70: // pshufd r, r/a, #8 case 0x73: // psrldq r, #8 case 0x1f: // evpcmpd/evpcmpq case 0x3f: // evpcmpb/evpcmpw
tail_size = 1; // the imm8 break; default: break;
}
ip++; // skip opcode
debug_only(has_disp32 = true); // has both kinds of operands! break;
case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1 case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a case 0xDD: // fld_d a; fst_d a; fstp_d a case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a case 0xDF: // fild_d a; fistp_d a case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
debug_only(has_disp32 = true); break;
case 0xE8: // call rdisp32 case 0xE9: // jmp rdisp32 if (which == end_pc_operand) return ip + 4;
assert(which == call32_operand, "call has no disp32 or imm"); return ip;
case 0xF0: // Lock goto again_after_prefix;
case 0xF3: // For SSE case 0xF2: // For SSE2 switch (0xFF & *ip++) { case REX: case REX_B: case REX_X: case REX_XB: case REX_R: case REX_RB: case REX_RX: case REX_RXB: case REX_W: case REX_WB: case REX_WX: case REX_WXB: case REX_WR: case REX_WRB: case REX_WRX: case REX_WRXB:
NOT_LP64(assert(false, "found 64bit prefix"));
ip++; default:
ip++;
}
debug_only(has_disp32 = true); // has both kinds of operands! break;
default:
ShouldNotReachHere();
#undef REP8 #undef REP16
}
assert(which != call32_operand, "instruction is not a call, jmp, or jcc"); #ifdef _LP64
assert(which != imm_operand, "instruction is not a movq reg, imm64"); #else // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
assert(which != imm_operand || has_disp32, "instruction has no imm32 field"); #endif// LP64
assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
// parse the output of emit_operand int op2 = 0xFF & *ip++; int base = op2 & 0x07; int op3 = -1; constint b100 = 4; constint b101 = 5; if (base == b100 && (op2 >> 6) != 3) {
op3 = 0xFF & *ip++;
base = op3 & 0x07; // refetch the base
} // now ip points at the disp (if any)
if (base == b101) { if (which == disp32_operand) return ip; // caller wants the disp32
ip += 4; // skip the disp32
} break;
case 1: // [01 reg 100][ss index base][disp8] // [01 reg 100][00 100 esp][disp8] // [01 reg base] [disp8]
ip += 1; // skip the disp8 break;
case 2: // [10 reg 100][ss index base][disp32] // [10 reg 100][00 100 esp][disp32] // [10 reg base] [disp32] if (which == disp32_operand) return ip; // caller wants the disp32
ip += 4; // skip the disp32 break;
case 3: // [11 reg base] (not a memory addressing mode) break;
}
if (which == end_pc_operand) { return ip + tail_size;
}
#ifdef _LP64
assert(which == narrow_oop_operand && !is_64bit, "instruction is not a movl adr, imm32"); #else
assert(which == imm_operand, "instruction has only an imm field"); #endif// LP64 return ip;
}
void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
InstructionMark im(this);
emit_int8((unsignedchar)0xE8);
intptr_t disp = entry - (pc() + sizeof(int32_t)); // Entry is NULL in case of a scratch emit.
assert(entry == NULL || is_simm32(disp), "disp=" INTPTR_FORMAT " must be 32bit offset (call2)", disp); // Technically, should use call32_operand, but this format is // implied by the fact that we're emitting a call instruction.
int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
emit_data((int) disp, rspec, operand);
}
// The 32-bit cmpxchg compares the value at adr with the contents of rax, // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. // The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
InstructionMark im(this);
prefix(adr, reg);
emit_int16(0x0F, (unsignedchar)0xB1);
emit_operand(reg, adr, 0);
}
// The 8-bit cmpxchg compares the value at adr with the contents of rax, // and stores reg into adr if so; otherwise, the value at adr is loaded into rax,. // The ZF is set if the compared values were equal, and cleared otherwise. void Assembler::cmpxchgb(Register reg, Address adr) { // cmpxchg
InstructionMark im(this);
prefix(adr, reg, true);
emit_int16(0x0F, (unsignedchar)0xB0);
emit_operand(reg, adr, 0);
}
void Assembler::comisd(XMMRegister dst, Address src) { // NOTE: dbx seems to decode this as comiss even though the // 0x66 is there. Strangely ucomisd comes out correct
NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
InstructionAttr attributes(AVX_128bit, /* rex_w */ VM_Version::supports_evex(), /* legacy_mode */ false, /* no_mask_reg */ true, /* uses_vl */ false);;
attributes.set_address_attributes(/* tuple_type */ EVEX_T1S, /* input_size_in_bits */ EVEX_64bit);
attributes.set_rex_vex_w_reverted();
simd_prefix(dst, xnoreg, src, VEX_SIMD_66, VEX_OPCODE_0F, &attributes);
emit_int8(0x2F);
emit_operand(dst, src, 0);
}
// Opcode / Instruction Op / En 64 - Bit Mode Compat / Leg Mode Description Implemented // F2 0F 38 F0 / r CRC32 r32, r / m8 RM Valid Valid Accumulate CRC32 on r / m8. v // F2 REX 0F 38 F0 / r CRC32 r32, r / m8* RM Valid N.E. Accumulate CRC32 on r / m8. - // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E. Accumulate CRC32 on r / m8. - // // F2 0F 38 F1 / r CRC32 r32, r / m16 RM Valid Valid Accumulate CRC32 on r / m16. v // // F2 0F 38 F1 / r CRC32 r32, r / m32 RM Valid Valid Accumulate CRC32 on r / m32. v // // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E. Accumulate CRC32 on r / m64. v void Assembler::crc32(Register crc, Register v, int8_t sizeInBytes) {
assert(VM_Version::supports_sse4_2(), "");
int8_t w = 0x01;
Prefix p = Prefix_EMPTY;
emit_int8((unsignedchar)0xF2); switch (sizeInBytes) { case 1:
w = 0; break; case 2: case 4: break;
LP64_ONLY(case 8:) // This instruction is not valid in 32 bits // Note: // http://www.intel.com/content/dam/www/public/us/en/documents/manuals/64-ia-32-architectures-software-developer-instruction-set-reference-manual-325383.pdf // // Page B - 72 Vol. 2C says // qwreg2 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : 11 qwreg1 qwreg2 // mem64 to qwreg 1111 0010 : 0100 1R0B : 0000 1111 : 0011 1000 : 1111 0000 : mod qwreg r / m // F0!!! // while 3 - 208 Vol. 2A // F2 REX.W 0F 38 F1 / r CRC32 r64, r / m64 RM Valid N.E.Accumulate CRC32 on r / m64. // // the 0 on a last bit is reserved for a different flavor of this instruction : // F2 REX.W 0F 38 F0 / r CRC32 r64, r / m8 RM Valid N.E.Accumulate CRC32 on r / m8.
p = REX_W; break; default:
assert(0, "Unsupported value for a sizeInBytes argument"); break;
}
LP64_ONLY(prefix(crc, v, p);)
emit_int32(0x0F,
0x38,
0xF0 | w,
0xC0 | ((crc->encoding() & 0x7) << 3) | (v->encoding() & 7));
}
void Assembler::crc32(Register crc, Address adr, int8_t sizeInBytes) {
assert(VM_Version::supports_sse4_2(), "");
InstructionMark im(this);
int8_t w = 0x01;
Prefix p = Prefix_EMPTY;
emit_int8((int8_t)0xF2); switch (sizeInBytes) { case 1:
w = 0; break; case 2: case 4: break;
LP64_ONLY(case 8:) // This instruction is not valid in 32 bits
p = REX_W; break; default:
assert(0, "Unsupported value for a sizeInBytes argument"); break;
--> --------------------
--> maximum size reached
--> --------------------
¤ Dauer der Verarbeitung: 0.43 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.