/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// A VtableStub holds an individual code stub for a pair (vtable index, #args) for either itables or vtables // There's a one-to-one relationship between a VtableStub and such a pair.
// A word on VtableStub sizing: // Such a vtable/itable stub consists of the instance data // and an immediately following CodeBuffer. // Unfortunately, the required space for the code buffer varies, depending on // the setting of compile time macros (PRODUCT, ASSERT, ...) and of command line // parameters. Actual data may have an influence on the size as well. // // A simple approximation for the VtableStub size would be to just take a value // "large enough" for all circumstances - a worst case estimate. // As there can exist many stubs - and they never go away - we certainly don't // want to waste more code cache space than absolutely necessary. // // We need a different approach which, as far as possible, should be independent // from or adaptive to code size variations. These variations may be caused by // changed compile time or run time switches as well as by changed emitter code. // // Here is the idea: // For the first stub we generate, we allocate a "large enough" code buffer. // Once all instructions are emitted, we know the actual size of the stub. // Remembering that size allows us to allocate a tightly matching code buffer // for all subsequent stubs. That covers all "static variance", i.e. all variance // that is due to compile time macros, command line parameters, machine capabilities, // and other influences which are immutable for the life span of the vm. // // Life isn't always that easy. Code size may depend on actual data, "load constant" // being an example for that. All code segments with such "dynamic variance" require // additional care. We need to know or estimate the worst case code size for each // such segment. With that knowledge, we can maintain a "slop counter" in the // platform-specific stub emitters. It accumulates the difference between worst-case // and actual code size. When the stub is fully generated, the actual stub size is // adjusted (increased) by the slop counter value. // // As a result, we allocate all but the first code buffers with the same, tightly matching size. //
// VtableStubs creates the code stubs for compiled calls through vtables. // There is one stub per (vtable index, args_size) pair, and the stubs are // never deallocated. They don't need to be GCed because they contain no oops. class VtableStub;
class VtableStubs : AllStatic { public: // N must be public (some compilers need this for _table) enum {
N = 256, // size of stub table; must be power of two
mask = N - 1
};
private: friendclass VtableStub; static VtableStub* _table[N]; // table of existing stubs staticint _number_of_vtable_stubs; // number of stubs created so far (for statistics) staticint _vtab_stub_size; // current size estimate for vtable stub (quasi-constant) staticint _itab_stub_size; // current size estimate for itable stub (quasi-constant)
static VtableStub* create_vtable_stub(int vtable_index); static VtableStub* create_itable_stub(int vtable_index); static VtableStub* lookup (bool is_vtable_stub, int vtable_index); staticvoid enter (bool is_vtable_stub, int vtable_index, VtableStub* s); staticinline uint hash (bool is_vtable_stub, int vtable_index); static address find_stub (bool is_vtable_stub, int vtable_index); staticvoid bookkeeping(MacroAssembler* masm, outputStream* out, VtableStub* s,
address npe_addr, address ame_addr, bool is_vtable_stub, int index, int slop_bytes, int index_dependent_slop); staticint code_size_limit(bool is_vtable_stub); staticvoid check_and_set_size_limit(bool is_vtable_stub, int code_size, int padding);
static VtableStub* entry_point(address pc); // vtable stub entry point for a pc staticbool contains(address pc); // is pc within any stub? static VtableStub* stub_containing(address pc); // stub containing pc or NULL staticint number_of_vtable_stubs() { return _number_of_vtable_stubs; } staticvoid initialize(); staticvoid vtable_stub_do(void f(VtableStub*)); // iterates over all vtable stubs
};
class VtableStub { private: friendclass VtableStubs;
static address _chunk; // For allocation static address _chunk_end; // For allocation static VMReg _receiver_location; // Where to find receiver
VtableStub* _next; // Pointer to next entry in hash table constshort _index; // vtable index short _ame_offset; // Where an AbstractMethodError might occur short _npe_offset; // Where a NullPointerException might occur bool _is_vtable_stub; // True if vtable stub, false, is itable stub /* code follows here */ // The vtableStub code
void* operatornew(size_t size, int code_size) throw();
bool matches(bool is_vtable_stub, int index) const { return _index == index && _is_vtable_stub == is_vtable_stub;
} bool contains(address pc) const { return code_begin() <= pc && pc < code_end(); }
private: void set_exception_points(address npe_addr, address ame_addr) {
_npe_offset = npe_addr - code_begin();
_ame_offset = ame_addr - code_begin();
assert(is_abstract_method_error(ame_addr), "offset must be correct");
assert(is_null_pointer_exception(npe_addr), "offset must be correct");
assert(!is_abstract_method_error(npe_addr), "offset must be correct");
assert(!is_null_pointer_exception(ame_addr), "offset must be correct");
}
// platform-dependent routines staticint pd_code_alignment(); // CNC: Removed because vtable stubs are now made with an ideal graph // static bool pd_disregard_arg_size();
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.