/* * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2019, 2022, Arm Limited. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
// for callee saved regs, according to the caller's ABI staticint compute_reg_save_area_size(const ABIDescriptor& abi) { int size = 0; for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); if (reg == rfp || reg == sp) continue; // saved/restored by prologue/epilogue if (!abi.is_volatile_reg(reg)) {
size += 8; // bytes
}
}
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
FloatRegister reg = as_FloatRegister(i); if (!abi.is_volatile_reg(reg)) { // Only the lower 64 bits of vector registers need to be preserved.
size += 8; // bytes
}
}
return size;
}
staticvoid preserve_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) { // 1. iterate all registers in the architecture // - check if they are volatile or not for the given abi // - if NOT, we need to save it here
int offset = reg_save_area_offset;
__ block_comment("{ preserve_callee_saved_regs "); for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); if (reg == rfp || reg == sp) continue; // saved/restored by prologue/epilogue if (!abi.is_volatile_reg(reg)) {
__ str(reg, Address(sp, offset));
offset += 8;
}
}
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
FloatRegister reg = as_FloatRegister(i); if (!abi.is_volatile_reg(reg)) {
__ strd(reg, Address(sp, offset));
offset += 8;
}
}
staticvoid restore_callee_saved_registers(MacroAssembler* _masm, const ABIDescriptor& abi, int reg_save_area_offset) { // 1. iterate all registers in the architecture // - check if they are volatile or not for the given abi // - if NOT, we need to restore it here
int offset = reg_save_area_offset;
__ block_comment("{ restore_callee_saved_regs "); for (int i = 0; i < Register::number_of_registers; i++) { Register reg = as_Register(i); if (reg == rfp || reg == sp) continue; // saved/restored by prologue/epilogue if (!abi.is_volatile_reg(reg)) {
__ ldr(reg, Address(sp, offset));
offset += 8;
}
}
for (int i = 0; i < FloatRegister::number_of_registers; i++) {
FloatRegister reg = as_FloatRegister(i); if (!abi.is_volatile_reg(reg)) {
__ ldrd(reg, Address(sp, offset));
offset += 8;
}
}
// out_arg_area (for stack arguments) doubles as shadow space for native calls. // make sure it is big enough. if (out_arg_area < frame::arg_reg_save_area_bytes) {
out_arg_area = frame::arg_reg_save_area_bytes;
}
int reg_save_area_size = compute_reg_save_area_size(abi);
RegSpiller arg_spilller(call_regs._arg_regs);
RegSpiller result_spiller(call_regs._ret_regs);
int shuffle_area_offset = 0; int res_save_area_offset = shuffle_area_offset + out_arg_area; int arg_save_area_offset = res_save_area_offset + result_spiller.spill_size_bytes(); int reg_save_area_offset = arg_save_area_offset + arg_spilller.spill_size_bytes(); int frame_data_offset = reg_save_area_offset + reg_save_area_size; int frame_bottom_offset = frame_data_offset + sizeof(UpcallStub::FrameData);
StubLocations locs; int ret_buf_offset = -1; if (needs_return_buffer) {
ret_buf_offset = frame_bottom_offset;
frame_bottom_offset += ret_buf_size; // use a free register for shuffling code to pick up return // buffer address from
locs.set(StubLocations::RETURN_BUFFER, abi._scratch1);
}
int frame_size = frame_bottom_offset;
frame_size = align_up(frame_size, StackAlignmentInBytes);
// The space we have allocated will look like: // // // FP-> | | // |---------------------| = frame_bottom_offset = frame_size // | (optional) | // | ret_buf | // |---------------------| = ret_buf_offset // | | // | FrameData | // |---------------------| = frame_data_offset // | | // | reg_save_area | // |---------------------| = reg_save_are_offset // | | // | arg_save_area | // |---------------------| = arg_save_are_offset // | | // | res_save_area | // |---------------------| = res_save_are_offset // | | // SP-> | out_arg_area | needs to be at end for shadow space // //
MacroAssembler* _masm = new MacroAssembler(&buffer);
address start = __ pc();
__ enter(); // set up frame
assert((abi._stack_alignment_bytes % 16) == 0, "must be 16 byte aligned"); // allocate frame (frame_size is also aligned, so stack is still aligned)
__ sub(sp, sp, frame_size);
// we have to always spill args since we need to do a call to get the thread // (and maybe attach it).
arg_spilller.generate_spill(_masm, arg_save_area_offset);
preserve_callee_saved_registers(_masm, abi, reg_save_area_offset);
// return value shuffle if (!needs_return_buffer) { #ifdef ASSERT if (call_regs._ret_regs.length() == 1) { // 0 or 1
VMStorage j_expected_result_reg; switch (ret_type) { case T_BOOLEAN: case T_BYTE: case T_SHORT: case T_CHAR: case T_INT: case T_LONG:
j_expected_result_reg = as_VMStorage(r0); break; case T_FLOAT: case T_DOUBLE:
j_expected_result_reg = as_VMStorage(v0); break; default:
fatal("unexpected return type: %s", type2name(ret_type));
} // No need to move for now, since CallArranger can pick a return type // that goes in the same reg for both CCs. But, at least assert they are the same
assert(call_regs._ret_regs.at(0) == j_expected_result_reg, "unexpected result register");
} #endif
} else {
assert(ret_buf_offset != -1, "no return buffer allocated");
__ lea(rscratch1, Address(sp, ret_buf_offset)); int offset = 0; for (int i = 0; i < call_regs._ret_regs.length(); i++) {
VMStorage reg = call_regs._ret_regs.at(i); if (reg.type() == StorageType::INTEGER) {
__ ldr(as_Register(reg), Address(rscratch1, offset));
offset += 8;
} elseif (reg.type() == StorageType::VECTOR) {
__ ldrd(as_FloatRegister(reg), Address(rscratch1, offset));
offset += 16; // needs to match VECTOR_REG_SIZE in AArch64Architecture (Java)
} else {
ShouldNotReachHere();
}
}
}
// Native caller has no idea how to handle exceptions, // so we just crash here. Up to callee to catch exceptions.
__ verify_oop(r0);
__ movptr(rscratch1, CAST_FROM_FN_PTR(uint64_t, UpcallLinker::handle_uncaught_exception));
__ blr(rscratch1);
__ should_not_reach_here();
__ block_comment("} exception handler");
_masm->flush();
#ifndef PRODUCT
stringStream ss;
ss.print("upcall_stub_%s", entry->signature()->as_C_string()); constchar* name = _masm->code_string(ss.as_string()); #else// PRODUCT constchar* name = "upcall_stub"; #endif// PRODUCT
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.