/* * Copyright (c) 1999, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2014, Red Hat Inc. All rights reserved. * Copyright (c) 2020, 2022, Huawei Technologies Co., Ltd. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Load object header
ld(hdr, Address(obj, hdr_offset)); // and mark it as unlocked
ori(hdr, hdr, markWord::unlocked_value); // save unlocked object header into the displaced header location on the stack
sd(hdr, Address(disp_hdr, 0)); // test if object header is still the same (i.e. unlocked), and if so, store the // displaced header address in the object header - if it is not the same, get the // object header instead
la(t1, Address(obj, hdr_offset));
cmpxchgptr(hdr, disp_hdr, t1, t0, done, /*fallthough*/NULL); // if the object header was the same, we're done // if the object header was not the same, it is now in the hdr register // => test if it is a stack pointer into the same stack (recursive locking), i.e.: // // 1) (hdr & aligned_mask) == 0 // 2) sp <= hdr // 3) hdr <= sp + page_size // // these 3 tests can be done by evaluating the following expression: // // (hdr -sp) & (aligned_mask - page_size) // // assuming both the stack pointer and page_size have their least // significant 2 bits cleared and page_size is a power of 2
sub(hdr, hdr, sp);
mv(t0, aligned_mask - os::vm_page_size());
andr(hdr, hdr, t0); // for recursive locking, the result is zero => save it in the displaced header // location (NULL in the displaced hdr location indicates recursive locking)
sd(hdr, Address(disp_hdr, 0)); // otherwise we don't care about the result and handle locking via runtime call
bnez(hdr, slow_case, /* is_far */ true); // done
bind(done);
increment(Address(xthread, JavaThread::held_monitor_count_offset())); return null_check_offset;
}
// load displaced header
ld(hdr, Address(disp_hdr, 0)); // if the loaded hdr is NULL we had recursive locking // if we had recursive locking, we are done
beqz(hdr, done); // load object
ld(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
verify_oop(obj); // test if object header is pointing to the displaced header, and if so, restore // the displaced header in the object - if the object header is not pointing to // the displaced header, get the object header instead // if the object header was not pointing to the displaced header, // we do unlocking via runtime call if (hdr_offset) {
la(t0, Address(obj, hdr_offset));
cmpxchgptr(disp_hdr, hdr, t0, t1, done, &slow_case);
} else {
cmpxchgptr(disp_hdr, hdr, obj, t1, done, &slow_case);
} // done
bind(done);
decrement(Address(xthread, JavaThread::held_monitor_count_offset()));
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register tmp1, Register tmp2) {
assert_different_registers(obj, klass, len, tmp1, tmp2); // This assumes that all prototype bits fitr in an int32_t
mv(tmp1, (int32_t)(intptr_t)markWord::prototype().value());
sd(tmp1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (UseCompressedClassPointers) { // Take care not to kill klass
encode_klass_not_null(tmp1, klass, tmp2);
sw(tmp1, Address(obj, oopDesc::klass_offset_in_bytes()));
} else {
sd(klass, Address(obj, oopDesc::klass_offset_in_bytes()));
}
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register tmp1, Register tmp2, bool is_tlab_allocated) {
assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0, "con_size_in_bytes is not multiple of alignment"); constint hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;
initialize_header(obj, klass, noreg, tmp1, tmp2);
if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) { // clear rest of allocated space constRegister index = tmp2; // 16: multiplier for threshold constint threshold = 16 * BytesPerWord; // approximate break even point for code size (see comments below) if (var_size_in_bytes != noreg) {
mv(index, var_size_in_bytes);
initialize_body(obj, index, hdr_size_in_bytes, tmp1);
} elseif (con_size_in_bytes <= threshold) { // use explicit null stores int i = hdr_size_in_bytes; if (i < con_size_in_bytes && (con_size_in_bytes % (2 * BytesPerWord))) { // 2: multiplier for BytesPerWord
sd(zr, Address(obj, i));
i += BytesPerWord;
} for (; i < con_size_in_bytes; i += BytesPerWord) {
sd(zr, Address(obj, i));
}
} elseif (con_size_in_bytes > hdr_size_in_bytes) {
block_comment("zero memory"); // use loop to null out the fields int words = (con_size_in_bytes - hdr_size_in_bytes) / BytesPerWord;
mv(index, words / 8); // 8: byte size
constint unroll = 8; // Number of sd(zr) instructions we'll unroll int remainder = words % unroll;
la(t0, Address(obj, hdr_size_in_bytes + remainder * BytesPerWord));
Label entry_point, loop;
j(entry_point);
bind(loop);
sub(index, index, 1); for (int i = -unroll; i < 0; i++) { if (-i == remainder) {
bind(entry_point);
}
sd(zr, Address(t0, i * wordSize));
} if (remainder == 0) {
bind(entry_point);
}
add(t0, t0, unroll * wordSize);
bnez(index, loop);
}
}
membar(MacroAssembler::StoreStore);
if (CURRENT_ENV->dtrace_alloc_probes()) {
assert(obj == x10, "must be");
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
}
// clear rest of allocated space constRegister len_zero = len;
initialize_body(obj, arr_size, header_size * BytesPerWord, len_zero);
membar(MacroAssembler::StoreStore);
if (CURRENT_ENV->dtrace_alloc_probes()) {
assert(obj == x10, "must be");
far_call(RuntimeAddress(Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)));
}
verify_oop(obj);
}
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache, Label &L) {
verify_oop(receiver); // explicit NULL check not needed since load from [klass_offset] causes a trap // check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
assert_different_registers(receiver, iCache, t0, t2);
cmp_klass(receiver, iCache, t0, t2 /* call-clobbered t2 as a tmp */, L);
}
void C1_MacroAssembler::build_frame(int framesize, int bang_size_in_bytes) {
assert(bang_size_in_bytes >= framesize, "stack bang size incorrect"); // Make sure there is enough stack space for this method's activation. // Note that we do this before creating a frame.
generate_stack_overflow_check(bang_size_in_bytes);
MacroAssembler::build_frame(framesize);
void C1_MacroAssembler::verified_entry(bool breakAtEntry) { // If we have to make this method not-entrant we'll overwrite its // first instruction with a jump. For this action to be legal we // must ensure that this first instruction is a J, JAL or NOP. // Make it a NOP.
IncompressibleRegion ir(this); // keep the nop as 4 bytes for patching.
assert_alignment(pc());
nop(); // 4 bytes
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.