/* * Copyright (c) 2008, 2020, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
if (_info->deoptimize_on_exception()) {
__ call(Runtime1::entry_for(Runtime1::predicate_failed_trap_id), relocInfo::runtime_call_type);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
debug_only(__ should_not_reach_here()); return;
} // Pass the array index on stack because all registers must be preserved
ce->verify_reserved_argument_area_size(_throw_index_out_of_bounds_exception ? 1 : 2); if (_index->is_cpu_register()) {
__ str_32(_index->as_register(), Address(SP));
} else {
__ mov_slow(Rtemp, _index->as_jint()); // Rtemp should be OK in C1
__ str_32(Rtemp, Address(SP));
}
assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF, "not enough room for call");
assert((_bytes_to_copy & 3) == 0, "must copy a multiple of four bytes");
Label call_patch; bool is_load = (_id == load_klass_id) || (_id == load_mirror_id) || (_id == load_appendix_id);
if (is_load && !VM_Version::supports_movw()) {
address start = __ pc();
// The following sequence duplicates code provided in MacroAssembler::patchable_mov_oop() // without creating relocation info entry.
assert((__ pc() - start) == patchable_instruction_offset, "should be");
__ ldr(_obj, Address(PC)); // Extra nop to handle case of large offset of oop placeholder (see NativeMovConstReg::set_data).
__ nop();
#ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) {
assert(((address)_pc_start)[i] == start[i], "should be the same code");
} #endif// ASSERT
}
address being_initialized_entry = __ pc(); if (CommentedAssembly) {
__ block_comment(" patch template");
} if (is_load) {
address start = __ pc(); if (_id == load_mirror_id || _id == load_appendix_id) {
__ patchable_mov_oop(_obj, (jobject)Universe::non_oop_word(), _index);
} else {
__ patchable_mov_metadata(_obj, (Metadata*)Universe::non_oop_word(), _index);
} #ifdef ASSERT for (int i = 0; i < _bytes_to_copy; i++) {
assert(((address)_pc_start)[i] == start[i], "should be the same code");
} #endif// ASSERT
} else { int* start = (int*)_pc_start; int* end = start + (_bytes_to_copy / BytesPerInt); while (start < end) {
__ emit_int32(*start++);
}
}
address end_of_patch = __ pc();
int bytes_to_skip = 0; if (_id == load_mirror_id) { int offset = __ offset(); if (CommentedAssembly) {
__ block_comment(" being_initialized check");
}
assert(_obj != noreg, "must be a valid register"); // Rtemp should be OK in C1
__ ldr(Rtemp, Address(_obj, java_lang_Class::klass_offset()));
__ ldr(Rtemp, Address(Rtemp, InstanceKlass::init_thread_offset()));
__ cmp(Rtemp, Rthread);
__ b(call_patch, ne);
__ b(_patch_site_continuation);
bytes_to_skip += __ offset() - offset;
}
if (CommentedAssembly) {
__ block_comment("patch data - 3 high bytes of the word");
} constint sizeof_patch_record = 4;
bytes_to_skip += sizeof_patch_record; int being_initialized_entry_offset = __ pc() - being_initialized_entry + sizeof_patch_record;
__ emit_int32(0xff | being_initialized_entry_offset << 8 | bytes_to_skip << 16 | _bytes_to_copy << 24);
void ImplicitNullCheckStub::emit_code(LIR_Assembler* ce) {
address a; if (_info->deoptimize_on_exception()) { // Deoptimize, do not throw the exception, because it is // probably wrong to do it here.
a = Runtime1::entry_for(Runtime1::predicate_failed_trap_id);
} else {
a = Runtime1::entry_for(Runtime1::throw_null_pointer_exception_id);
}
ce->compilation()->implicit_exception_table()->append(_offset, __ offset());
__ bind(_entry);
__ call(a, relocInfo::runtime_call_type);
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
DEBUG_ONLY(STOP("ImplicitNullCheck");)
}
void SimpleExceptionStub::emit_code(LIR_Assembler* ce) {
__ bind(_entry); // Pass the object on stack because all registers must be preserved if (_obj->is_cpu_register()) {
ce->verify_reserved_argument_area_size(1);
__ str(_obj->as_pointer_register(), Address(SP));
} else {
assert(_obj->is_illegal(), "should be");
}
__ call(Runtime1::entry_for(_stub), relocInfo::runtime_call_type);
ce->add_call_info_here(_info);
DEBUG_ONLY(STOP("SimpleException");)
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.