/* * Copyright (c) 2008, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Introduced AddressLiteral and its subclasses to ease portability from // x86 and avoid relocation issues class AddressLiteral {
RelocationHolder _rspec; // Typically we use AddressLiterals we want to use their rval // However in some situations we want the lval (effect address) of the item. // We provide a special factory for making those lvals. bool _is_lval;
address _target;
private: static relocInfo::relocType reloc_for_target(address target) { // Used for ExternalAddress or when the type is not specified // Sometimes ExternalAddress is used for values which aren't // exactly addresses, like the card table base. // external_word_type can't be used for values in the first page // so just skip the reloc in that case. return external_word_Relocation::can_be_relocated(target) ? relocInfo::external_word_type : relocInfo::none;
}
// Inlined constants, for use with ldr_literal / bind_literal // Note: InlinedInteger not supported (use move_slow(Register,int[,cond])) class InlinedLiteral: StackObj { public:
Label label; // need to be public for direct access with &
InlinedLiteral() {
}
};
class InlinedMetadata: public InlinedLiteral { private:
Metadata *_data;
InlinedAddress(Metadata *data): InlinedLiteral(), _literal((address)data, relocInfo::metadata_type) {
ShouldNotReachHere(); // use InlinedMetadata or mov_metadata
}
InlinedAddress(address target, const RelocationHolder &rspec): InlinedLiteral(), _literal(target, rspec) {
assert(rspec.type() != relocInfo::oop_type, "Do not use InlinedAddress for oops");
assert(rspec.type() != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas");
}
InlinedAddress(address target, relocInfo::relocType rtype): InlinedLiteral(), _literal(target, rtype) {
assert(rtype != relocInfo::oop_type, "Do not use InlinedAddress for oops");
assert(rtype != relocInfo::metadata_type, "Do not use InlinedAddress for metadatas");
}
// Note: default is relocInfo::none for InlinedAddress
InlinedAddress(address target): InlinedLiteral(), _literal(target, relocInfo::none) {
}
class MacroAssembler: public Assembler { protected:
// Support for VM calls //
// This is the base routine called by the different versions of call_VM_leaf. void call_VM_leaf_helper(address entry_point, int number_of_arguments);
// This is the base routine called by the different versions of call_VM. The interpreter // may customize this version by overriding it for its purposes (e.g., to save/restore // additional registers when doing a VM call). virtualvoid call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions); public:
// These routines should emit JVMTI PopFrame and ForceEarlyReturn handling code. // The implementation is only non-empty for the InterpreterMacroAssembler, // as only the interpreter handles PopFrame and ForceEarlyReturn requests. virtualvoid check_and_handle_popframe() {} virtualvoid check_and_handle_earlyret() {}
// By default, we do not need relocation information for non // patchable absolute addresses. However, when needed by some // extensions, ignore_non_patchable_relocations can be modified, // returning false to preserve all relocation information. inlinebool ignore_non_patchable_relocations() { returntrue; }
void align(int modulus);
// Support for VM calls // // It is imperative that all calls into the VM are handled via the call_VM methods. // They make sure that the stack linkage is setup correctly. call_VM's correspond // to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
// The following methods are required by templateTable.cpp, // but not used on ARM. void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true); void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true); void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true); void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
// Note: The super_call_VM calls are not used on ARM
// Raw call, without saving/restoring registers, exception handling, etc. // Mainly used from various stubs. // Note: if 'save_R9_if_scratched' is true, call_VM may on some // platforms save values on the stack. Set it to false (and handle // R9 in the callers) if the top of the stack must not be modified // by call_VM. void call_VM(address entry_point, bool save_R9_if_scratched);
// Always sets/resets sp, which default to SP if (last_sp == noreg) // Optionally sets/resets fp (use noreg to avoid setting it) // Optionally sets/resets pc depending on save_last_java_pc flag // Note: when saving PC, set_last_Java_frame returns PC's offset in the code section // (for oop_maps offset computation) int set_last_Java_frame(Register last_sp, Register last_fp, bool save_last_java_pc, Register tmp); void reset_last_Java_frame(Register tmp); // status set in set_last_Java_frame for reset_last_Java_frame bool _fp_saved; bool _pc_saved;
// Test sub_klass against super_klass, with fast and slow paths.
// The fast path produces a tri-state answer: yes / no / maybe-slow. // One of the three labels can be NULL, meaning take the fall-through. // No registers are killed, except temp_regs. void check_klass_subtype_fast_path(Register sub_klass, Register super_klass, Register temp_reg, Register temp_reg2,
Label* L_success,
Label* L_failure,
Label* L_slow_path);
// The rest of the type check; must be wired to a corresponding fast path. // It does not repeat the fast path logic, so don't use it standalone. // temp_reg3 can be noreg, if no temps are available. // Updates the sub's secondary super cache as necessary. // If set_cond_codes: // - condition codes will be Z on success, NZ on failure. // - temp_reg will be 0 on success, non-0 on failure void check_klass_subtype_slow_path(Register sub_klass, Register super_klass, Register temp_reg, Register temp_reg2, Register temp_reg3, // auto assigned if noreg
Label* L_success,
Label* L_failure, bool set_cond_codes = false);
// Simplified, combined version, good for typical uses. // temp_reg3 can be noreg, if no temps are available. It is used only on slow path. // Falls through on failure. void check_klass_subtype(Register sub_klass, Register super_klass, Register temp_reg, Register temp_reg2, Register temp_reg3, // auto assigned on slow path if noreg
Label& L_success);
// Returns address of receiver parameter, using tmp as base register. tmp and params_count can be the same.
Address receiver_argument_address(Register params_base, Register params_count, Register tmp);
void _verify_oop(Register reg, constchar* s, constchar* file, int line); void _verify_oop_addr(Address addr, constchar * s, constchar* file, int line);
// TODO: verify method and klass metadata (compare against vptr?) void _verify_method_ptr(Register reg, constchar * msg, constchar * file, int line) {} void _verify_klass_ptr(Register reg, constchar * msg, constchar * file, int line) {}
// Puts address of allocated object into register `obj` and end of allocated object into register `obj_end`. void tlab_allocate(Register obj, Register obj_end, Register tmp1,
RegisterOrConstant size_expression, Label& slow_case);
void breakpoint(AsmCondition cond = al); void stop(constchar* msg); // prints msg and continues void warn(constchar* msg); void unimplemented(constchar* what = ""); void should_not_reach_here() { stop("should not reach here"); } staticvoid debug(constchar* msg, const intx* registers);
// Create a walkable frame to help tracking down who called this code. // Returns the frame size in words. int should_not_call_this() {
raw_push(FP, LR);
should_not_reach_here();
flush(); return 2; // frame_size_in_words (FP+LR)
}
int save_all_registers(); void restore_all_registers(); int save_caller_save_registers(); void restore_caller_save_registers();
// add_slow and mov_slow are used to manipulate offsets larger than 1024, // these functions are not expected to handle all possible constants, // only those that can really occur during compilation void add_slow(Register rd, Register rn, int c); void sub_slow(Register rd, Register rn, int c);
void patchable_mov_oop(Register rd, jobject o, int oop_index) {
mov_oop(rd, o, oop_index);
} void mov_oop(Register rd, jobject o, int index = 0, AsmCondition cond = al);
void patchable_mov_metadata(Register rd, Metadata* o, int index) {
mov_metadata(rd, o, index);
} void mov_metadata(Register rd, Metadata* o, int index = 0);
// Note: this variant of mov_address assumes the address moves with // the code. Do *not* implement it with non-relocated instructions, // unless PC-relative. void mov_relative_address(Register rd, address addr, AsmCondition cond = al) { int offset = addr - pc() - 8;
assert((offset & 3) == 0, "bad alignment"); if (offset >= 0) {
assert(AsmOperand::is_rotated_imm(offset), "addr too far");
add(rd, PC, offset, cond);
} else {
assert(AsmOperand::is_rotated_imm(-offset), "addr too far");
sub(rd, PC, -offset, cond);
}
}
// Runtime address that may vary from one execution to another. // Warning: do not implement as a PC relative address. void mov_address(Register rd, address addr) {
mov_address(rd, addr, RelocationHolder::none);
}
// rspec can be RelocationHolder::none (for ignored symbolic Relocation). // In that case, the address is absolute and the generated code need // not be relocable. void mov_address(Register rd, address addr, RelocationHolder const& rspec) {
assert(rspec.type() != relocInfo::runtime_call_type, "do not use mov_address for runtime calls");
assert(rspec.type() != relocInfo::static_call_type, "do not use mov_address for relocable calls"); if (rspec.type() == relocInfo::none) { // absolute address, relocation not needed
mov_slow(rd, (intptr_t)addr); return;
} if (VM_Version::supports_movw()) {
relocate(rspec); int c = (int)addr;
movw(rd, c & 0xffff); if ((unsignedint)c >> 16) {
movt(rd, (unsignedint)c >> 16);
} return;
}
Label skip_literal;
InlinedAddress addr_literal(addr, rspec);
ldr_literal(rd, addr_literal);
b(skip_literal);
bind_literal(addr_literal);
bind(skip_literal);
}
// Note: Do not define mov_address for a Label // // Load from addresses potentially within the code are now handled // InlinedLiteral subclasses (to allow more flexibility on how the // ldr_literal is performed).
void ldr_literal(Register rd, InlinedString& L) { constchar* msg = L.msg(); if (code()->consts()->contains((address)msg)) { // string address moves with the code
ldr(rd, Address(PC, ((address)msg) - pc() - 8)); return;
} // Warning: use external strings with care. They are not relocated // if the code moves. If needed, use code_string to move them // to the consts section.
ldr(rd, Address(PC, target(L.label) - pc() - 8));
}
void ldr_literal(Register rd, InlinedMetadata& L) { // relocation done in the bind_literal for metadatas
ldr(rd, Address(PC, target(L.label) - pc() - 8));
}
void bind_literal(InlinedAddress& L) {
bind(L.label);
assert(L.rspec().type() != relocInfo::metadata_type, "Must use InlinedMetadata"); // We currently do not use oop 'bound' literals. // If the code evolves and the following assert is triggered, // we need to implement InlinedOop (see InlinedMetadata).
assert(L.rspec().type() != relocInfo::oop_type, "Inlined oops not supported"); // Note: relocation is handled by relocate calls in ldr_literal
AbstractAssembler::emit_address((address)L.target());
}
void bind_literal(InlinedString& L) { constchar* msg = L.msg(); if (code()->consts()->contains((address)msg)) { // The Label should not be used; avoid binding it // to detect errors. return;
}
bind(L.label);
AbstractAssembler::emit_address((address)L.msg());
}
// If <cond> holds, compares r1 and r2. Otherwise, flags are set so that <cond> does not hold. void cond_cmp(Register r1, Register r2, AsmCondition cond) {
cmp(r1, r2, cond);
}
// If <cond> holds, compares r and imm. Otherwise, flags are set so that <cond> does not hold. void cond_cmp(Register r, int imm, AsmCondition cond) {
cmp(r, imm, cond);
}
void branch_if_negative_32(Register r, Label& L) { // TODO: This function and branch_if_any_negative_32 could possibly // be revised after the aarch64 removal. // tbnz is not used instead of tst & b.mi because destination may be out of tbnz range (+-32KB) // since these methods are used in LIR_Assembler::emit_arraycopy() to jump to stub entry.
tst_32(r, r);
b(L, mi);
}
// address_placeholder_instruction is invalid instruction and is used // as placeholder in code for address of label enum { address_placeholder_instruction = 0xFFFFFFFF };
void emit_address(Label& L) {
assert(!L.is_bound(), "otherwise address will not be patched");
target(L); // creates relocation which will be patched later
assert ((offset() & (wordSize-1)) == 0, "should be aligned by word size");
// Variable-length jump and calls. We now distinguish only the // patchable case from the other cases. Patchable must be // distinguised from relocable. Relocable means the generated code // containing the jump/call may move. Patchable means that the // targeted address may be changed later.
// Non patchable versions. // - used only for relocInfo::runtime_call_type and relocInfo::none // - may use relative or absolute format (do not use relocInfo::none // if the generated code may move) // - the implementation takes into account switch to THUMB mode if the // destination is a THUMB address // - the implementation supports far targets // // To reduce regression risk, scratch still defaults to noreg on // arm32. This results in patchable instructions. However, if // patching really matters, the call sites should be modified and // use patchable_call or patchable_jump. If patching is not required // and if a register can be cloberred, it should be explicitly // specified to allow future optimizations. void jump(address target,
relocInfo::relocType rtype = relocInfo::runtime_call_type, Register scratch = noreg, AsmCondition cond = al);
// Patchable version: // - set_destination can be used to atomically change the target // // The targets for patchable_jump and patchable_call must be in the // code cache. // [ including possible extensions of the code cache, like AOT code ] // // To reduce regression risk, scratch still defaults to noreg on // arm32. If a register can be cloberred, it should be explicitly // specified to allow future optimizations. void patchable_jump(address target,
relocInfo::relocType rtype = relocInfo::runtime_call_type, Register scratch = noreg, AsmCondition cond = al
);
// patchable_call may scratch Rtemp int patchable_call(address target,
RelocationHolder const& rspec, bool c2 = false);
#ifndef PRODUCT // Preserves flags and all registers. // On SMP the updated value might not be visible to external observers without a synchronization barrier void cond_atomic_inc32(AsmCondition cond, int* counter_addr); #endif// !PRODUCT
void pd_patch_instruction(address branch, address target, constchar* file, int line);
// Loading and storing values by size and signed-ness; // size must not exceed wordSize (i.e. 8-byte values are not supported on 32-bit ARM); // each of these calls generates exactly one load or store instruction, // so src can be pre- or post-indexed address. // 32-bit ARM variants also support conditional execution void load_sized_value(Register dst, Address src, size_t size_in_bytes, bool is_signed, AsmCondition cond = al); void store_sized_value(Register src, Address dst, size_t size_in_bytes, AsmCondition cond = al);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.