/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, 2020, Red Hat Inc. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
address NativePltCall::plt_jump() const {
address entry = plt_entry(); // Virtual PLT code has move instruction first if (((NativeGotJump*)entry)->is_GotJump()) { return entry;
} else { return nativeLoadGot_at(entry)->next_instruction_address();
}
}
address NativePltCall::plt_load_got() const {
address entry = plt_entry(); if (!((NativeGotJump*)entry)->is_GotJump()) { // Virtual PLT code has move instruction first return entry;
} else { // Static PLT code has move instruction second (from c2i stub) return nativeGotJump_at(entry)->next_instruction_address();
}
}
address NativePltCall::plt_c2i_stub() const {
address entry = plt_load_got(); // This method should be called only for static calls which has C2I stub.
NativeLoadGot* load = nativeLoadGot_at(entry); return entry;
}
void NativePltCall::set_destination_mt_safe(address dest) { // rewriting the value in the GOT, it should always be aligned
NativeGotJump* jump = nativeGotJump_at(plt_jump());
address* got = (address *) jump->got_address();
*got = dest;
}
// Do we use a trampoline stub for this call?
CodeBlob* cb = CodeCache::find_blob(addr);
assert(cb && cb->is_nmethod(), "sanity");
nmethod *nm = (nmethod *)cb; if (nm->stub_contains(destination) && is_NativeCallTrampolineStub_at(destination)) { // Yes we do, so get the destination from the trampoline stub. const address trampoline_stub_addr = destination;
destination = nativeCallTrampolineStub_at(trampoline_stub_addr)->destination();
}
return destination;
}
// Similar to replace_mt_safe, but just changes the destination. The // important thing is that free-running threads are able to execute this // call instruction at all times. // // Used in the runtime linkage of calls; see class CompiledIC. // // Add parameter assert_lock to switch off assertion // during code generation, where no patching lock is needed. void NativeCall::set_destination_mt_safe(address dest, bool assert_lock) {
assert(!assert_lock ||
(Patching_lock->is_locked() || SafepointSynchronize::is_at_safepoint()) ||
CompiledICLocker::is_safe(addr_at(0)), "concurrent code patching");
address NativeJump::jump_destination() const {
address dest = MacroAssembler::target_addr_for_insn_or_null(instruction_address());
// We use jump to self as the unresolved address which the inline // cache code (and relocs) know about // As a special case we also use sequence movptr(r,0); br(r); // i.e. jump to 0 when we need leave space for a wide immediate // load
// return -1 if jump to self or to 0 if ((dest == (address)this) || dest == 0) {
dest = (address) -1;
} return dest;
}
void NativeJump::set_jump_destination(address dest) { // We use jump to self as the unresolved address which the inline // cache code (and relocs) know about if (dest == (address) -1)
dest = instruction_address();
// We use jump to self as the unresolved address which the inline // cache code (and relocs) know about // As a special case we also use jump to 0 when first generating // a general jump
// return -1 if jump to self or to 0 if ((dest == (address)this) || dest == 0) {
dest = (address) -1;
} return dest;
}
// We use jump to self as the unresolved address which the inline // cache code (and relocs) know about if (dest == (address) -1) {
dest = instruction_address();
}
bool NativeInstruction::is_safepoint_poll() { // a safepoint_poll is implemented in two steps as either // // adrp(reg, polling_page); // ldr(zr, [reg, #offset]); // // or // // mov(reg, polling_page); // ldr(zr, [reg, #offset]); // // or // // ldr(reg, [rthread, #offset]); // ldr(zr, [reg, #offset]); // // however, we cannot rely on the polling page address load always // directly preceding the read from the page. C1 does that but C2 // has to do the load and read as two independent instruction // generation steps. that's because with a single macro sequence the // generic C2 code can only add the oop map before the mov/adrp and // the trap handler expects an oop map to be associated with the // load. with the load scheuled as a prior step the oop map goes // where it is needed. // // so all we can do here is check that marked instruction is a load // word to zr return is_ldrw_to_zr(address(this));
}
// Generate a trampoline for a branch to dest. If there's no need for a // trampoline, simply patch the call directly to dest.
address NativeCall::trampoline_jump(CodeBuffer &cbuf, address dest) {
MacroAssembler a(&cbuf);
address stub = NULL;
if (stub == NULL) { // If we generated no stub, patch this call directly to dest. // This will happen if we don't need far branches or if there // already was a trampoline.
set_destination(dest);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.