/* * Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2016 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// If icache check fails, then jump to runtime routine. // Note: RECEIVER must still contain the receiver!
load_const_optimized(Z_R1_scratch, AddressLiteral(SharedRuntime::get_ic_miss_stub()));
z_br(Z_R1_scratch);
align(CodeEntryAlignment);
bind(ic_hit);
}
// and mark it as unlocked.
z_oill(hdr, markWord::unlocked_value); // Save unlocked object header into the displaced header location on the stack.
z_stg(hdr, Address(disp_hdr, (intptr_t)0)); // Test if object header is still the same (i.e. unlocked), and if so, store the // displaced header address in the object header. If it is not the same, get the // object header instead.
z_csg(hdr, disp_hdr, hdr_offset, obj); // If the object header was the same, we're done.
branch_optimized(Assembler::bcondEqual, done); // If the object header was not the same, it is now in the hdr register. // => Test if it is a stack pointer into the same stack (recursive locking), i.e.: // // 1) (hdr & markWord::lock_mask_in_place) == 0 // 2) rsp <= hdr // 3) hdr <= rsp + page_size // // These 3 tests can be done by evaluating the following expression: // // (hdr - Z_SP) & (~(page_size-1) | markWord::lock_mask_in_place) // // assuming both the stack pointer and page_size have their least // significant 2 bits cleared and page_size is a power of 2
z_sgr(hdr, Z_SP);
load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markWord::lock_mask_in_place));
z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0). // For recursive locking, the result is zero. => Save it in the displaced header // location (NULL in the displaced hdr location indicates recursive locking).
z_stg(hdr, Address(disp_hdr, (intptr_t)0)); // Otherwise we don't care about the result and handle locking via runtime call.
branch_optimized(Assembler::bcondNotZero, slow_case); // done
bind(done);
}
// Load displaced header.
z_ltg(hdr, Address(disp_hdr, (intptr_t)0)); // If the loaded hdr is NULL we had recursive locking, and we are done.
z_bre(done); // Load object.
z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
verify_oop(obj, FILE_AND_LINE); // Test if object header is pointing to the displaced header, and if so, restore // the displaced header in the object. If the object header is not pointing to // the displaced header, get the object header instead.
z_csg(disp_hdr, hdr, hdr_offset, obj); // If the object header was not pointing to the displaced header, // we do unlocking via runtime call.
branch_optimized(Assembler::bcondNotEqual, slow_case); // done
bind(done);
}
void C1_MacroAssembler::try_allocate( Register obj, // result: Pointer to object after successful allocation. Register var_size_in_bytes, // Object size in bytes if unknown at compile time; invalid otherwise. int con_size_in_bytes, // Object size in bytes if known at compile time. Register t1, // Temp register: Must be global register for incr_allocated_bytes.
Label& slow_case // Continuation point if fast allocation fails.
) { if (UseTLAB) {
tlab_allocate(obj, var_size_in_bytes, con_size_in_bytes, t1, slow_case);
} else { // Allocation in shared Eden not implemented, because sapjvm allocation trace does not allow it.
z_brul(slow_case);
}
}
void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register len, Register Rzero, Register t1) {
assert_different_registers(obj, klass, len, t1, Rzero); // This assumes that all prototype bits fit in an int32_t.
load_const_optimized(t1, (intx)markWord::prototype().value());
z_stg(t1, Address(obj, oopDesc::mark_offset_in_bytes()));
if (len->is_valid()) { // Length will be in the klass gap, if one exists.
z_st(len, Address(obj, arrayOopDesc::length_offset_in_bytes()));
} elseif (UseCompressedClassPointers) {
store_klass_gap(Rzero, obj); // Zero klass gap for compressed oops.
}
store_klass(klass, obj, t1);
}
// Initialize object fields. // See documentation for MVCLE instruction!!!
assert(objectFields->encoding()%2==0, "objectFields must be an even register");
assert(len_in_bytes->encoding() == (objectFields->encoding()+1), "objectFields and len_in_bytes must be a register pair");
assert(Rzero->encoding()%2==1, "Rzero must be an odd register");
// Use Rzero as src length, then mvcle will copy nothing // and fill the object with the padding value 0.
move_long_ext(objectFields, as_Register(Rzero->encoding()-1), 0);
bind(done);
}
void C1_MacroAssembler::allocate_object( Register obj, // Result: pointer to object after successful allocation. Register t1, // temp register Register t2, // temp register: Must be a global register for try_allocate. int hdr_size, // object header size in words int obj_size, // object size in words Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
) {
assert_different_registers(obj, t1, t2, klass);
// Allocate space and initialize header.
try_allocate(obj, noreg, obj_size * wordSize, t1, slow_case);
// Clear rest of allocated space. constint threshold = 4 * BytesPerWord; if (con_size_in_bytes <= threshold) { // Use explicit null stores. // code size = 6*n bytes (n = number of fields to clear) for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
z_stg(Rzero, Address(obj, i));
} else { // Code size generated by initialize_body() is 16. Register object_fields = Z_R0_scratch; Register len_in_bytes = Z_R1_scratch;
z_la(object_fields, hdr_size_in_bytes, obj);
load_const_optimized(len_in_bytes, con_size_in_bytes - hdr_size_in_bytes);
initialize_body(object_fields, len_in_bytes, Rzero);
}
// Dtrace support is unimplemented. // if (CURRENT_ENV->dtrace_alloc_probes()) { // assert(obj == rax, "must be"); // call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id))); // }
verify_oop(obj, FILE_AND_LINE);
}
void C1_MacroAssembler::allocate_array( Register obj, // result: Pointer to array after successful allocation. Register len, // array length Register t1, // temp register Register t2, // temp register int hdr_size, // object header size in words int elt_size, // element size in bytes Register klass, // object klass
Label& slow_case // Continuation point if fast allocation fails.
) {
assert_different_registers(obj, len, t1, t2, klass);
// Determine alignment mask.
assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
// Check for negative or excessive length.
compareU64_and_branch(len, (int32_t)max_array_allocation_length, bcondHigh, slow_case);
// Compute array size. // Note: If 0 <= len <= max_length, len*elt_size + header + alignment is // smaller or equal to the largest integer. Also, since top is always // aligned, we can do the alignment here instead of at the end address // computation. constRegister arr_size = t2; switch (elt_size) { case 1: lgr_if_needed(arr_size, len); break; case 2: z_sllg(arr_size, len, 1); break; case 4: z_sllg(arr_size, len, 2); break; case 8: z_sllg(arr_size, len, 3); break; default: ShouldNotReachHere();
}
add2reg(arr_size, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff); // Align array size.
try_allocate(obj, arr_size, 0, t1, slow_case);
initialize_header(obj, klass, len, noreg, t1);
// Clear rest of allocated space.
Label done; Register object_fields = t1; Register Rzero = Z_R1_scratch;
z_aghi(arr_size, -(hdr_size * BytesPerWord));
z_bre(done); // Jump if size of fields is zero.
z_la(object_fields, hdr_size * BytesPerWord, obj);
z_xgr(Rzero, Rzero);
initialize_body(object_fields, arr_size, Rzero);
bind(done);
// Dtrace support is unimplemented. // if (CURRENT_ENV->dtrace_alloc_probes()) { // assert(obj == rax, "must be"); // call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id))); // }
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.