/* * Copyright (c) 2020, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions.
*/
int RegSpiller::compute_spill_area(const GrowableArray<VMStorage>& regs) { int result_size = 0; for (int i = 0; i < regs.length(); i++) {
result_size += pd_reg_size(regs.at(i));
} return result_size;
}
void RegSpiller::generate(MacroAssembler* masm, int rsp_offset, bool spill) const {
assert(rsp_offset != -1, "rsp_offset should be set"); int offset = rsp_offset; for (int i = 0; i < _regs.length(); i++) {
VMStorage reg = _regs.at(i); if (spill) {
pd_store_reg(masm, offset, reg);
} else {
pd_load_reg(masm, offset, reg);
}
offset += pd_reg_size(reg);
}
}
void ArgumentShuffle::print_on(outputStream* os) const {
os->print_cr("Argument shuffle {"); for (int i = 0; i < _moves.length(); i++) {
Move move = _moves.at(i);
VMStorage from_reg = move.from;
VMStorage to_reg = move.to;
os->print("Move from ");
from_reg.print_on(os);
os->print(" to ");
to_reg.print_on(os);
os->print_cr("");
}
os->print_cr("Stack argument bytes: %d", _out_arg_bytes);
os->print_cr("}");
}
int NativeCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* out_regs, int num_args) const { int src_pos = 0;
uint32_t max_stack_offset = 0; for (int i = 0; i < num_args; i++) { switch (sig_bt[i]) { case T_BOOLEAN: case T_CHAR: case T_BYTE: case T_SHORT: case T_INT: case T_FLOAT: {
VMStorage reg = _input_regs.at(src_pos++);
out_regs[i] = reg; if (reg.is_stack())
max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size()); break;
} case T_LONG: case T_DOUBLE: {
assert((i + 1) < num_args && sig_bt[i + 1] == T_VOID, "expecting half");
VMStorage reg = _input_regs.at(src_pos++);
out_regs[i] = reg; if (reg.is_stack())
max_stack_offset = MAX2(max_stack_offset, reg.offset() + reg.stack_size()); break;
} case T_VOID: // Halves of longs and doubles
assert(i != 0 && (sig_bt[i - 1] == T_LONG || sig_bt[i - 1] == T_DOUBLE), "expecting half");
out_regs[i] = VMStorage::invalid(); break; default:
ShouldNotReachHere(); break;
}
} return align_up(max_stack_offset, 8);
}
int JavaCallingConvention::calling_convention(const BasicType* sig_bt, VMStorage* regs, int num_args) const {
VMRegPair* vm_regs = NEW_RESOURCE_ARRAY(VMRegPair, num_args); int slots = SharedRuntime::java_calling_convention(sig_bt, vm_regs, num_args); for (int i = 0; i < num_args; i++) {
VMRegPair pair = vm_regs[i]; // note, we ignore second here. Signature should consist of register-size values. So there should be // no need for multi-register pairs. //assert(!pair.first()->is_valid() || pair.is_single_reg(), "must be: %s");
regs[i] = as_VMStorage(pair.first());
} return slots << LogBytesPerInt;
}
class ComputeMoveOrder: public StackObj { class MoveOperation;
// segment_mask_or_size is not taken into account since // VMStorages that differ only in mask or size can still // conflict staticinlineunsigned hash(const VMStorage& vms) { returnstatic_cast<unsignedint>(vms.type()) ^ vms.index_or_offset();
} staticinlinebool equals(const VMStorage& a, const VMStorage& b) { return a.type() == b.type() && a.index_or_offset() == b.index_or_offset();
}
using KillerTable = ResourceHashtable<
VMStorage, MoveOperation*,
32, // doesn't need to be big. don't have that many argument registers (in known ABIs)
AnyObj::RESOURCE_AREA,
mtInternal,
ComputeMoveOrder::hash,
ComputeMoveOrder::equals
>;
class MoveOperation: public ResourceObj { friendclass ComputeMoveOrder; private:
VMStorage _src;
VMStorage _dst; bool _processed;
MoveOperation* _next;
MoveOperation* _prev;
// insert void break_cycle(VMStorage temp_register) { // create a new store following the last store // to move from the temp_register to the original
MoveOperation* new_store = new MoveOperation(temp_register, _dst);
// break the cycle of links and insert new_store at the end // break the reverse link.
MoveOperation* p = prev();
assert(p->next() == this, "must be");
_prev = nullptr;
p->_next = new_store;
new_store->_prev = p;
// change the original store to save it's value in the temp.
_dst = temp_register;
}
void link(KillerTable& killer) { // link this store in front the store that it depends on
MoveOperation** n = killer.get(_src); if (n != nullptr) {
MoveOperation* src_killer = *n;
assert(_next == nullptr && src_killer->_prev == nullptr, "shouldn't have been set yet");
_next = src_killer;
src_killer->_prev = this;
}
}
Move as_move() { return {_src, _dst};
}
};
private: int _total_in_args; const VMStorage* _in_regs; int _total_out_args; const VMStorage* _out_regs; const BasicType* _in_sig_bt;
VMStorage _tmp_vmreg;
GrowableArray<MoveOperation*> _edges;
GrowableArray<Move> _moves;
void compute() {
assert(_total_out_args >= _total_in_args, "can only add prefix args"); // Note that total_out_args args can be greater than total_in_args in the case of upcalls. // There will be a leading MH receiver arg in the out args in that case. // // Leading args in the out args will be ignored below because we iterate from the end of // the register arrays until !(in_idx >= 0), and total_in_args is smaller. // // Stub code adds a move for the receiver to j_rarg0 (and potential other prefix args) manually. for (int in_idx = _total_in_args - 1, out_idx = _total_out_args - 1; in_idx >= 0; in_idx--, out_idx--) {
BasicType bt = _in_sig_bt[in_idx];
assert(bt != T_ARRAY, "array not expected");
VMStorage in_reg = _in_regs[in_idx];
VMStorage out_reg = _out_regs[out_idx];
if (out_reg.is_stack() || out_reg.is_frame_data()) { // Move operations where the dest is the stack can all be // scheduled first since they can't interfere with the other moves. // The input and output stack spaces are distinct from each other.
Move move{in_reg, out_reg};
_moves.push(move);
} elseif (in_reg == out_reg
|| bt == T_VOID) { // 1. Can skip non-stack identity moves. // // 2. Upper half of long or double (T_VOID). // Don't need to do anything. continue;
} else {
_edges.append(new MoveOperation(in_reg, out_reg));
}
} // Break any cycles in the register moves and emit the in the // proper order.
compute_store_order(_tmp_vmreg);
}
// Walk the edges breaking cycles between moves. The result list // can be walked in order to produce the proper set of loads void compute_store_order(VMStorage temp_register) { // Record which moves kill which registers
KillerTable killer; // a map of VMStorage -> MoveOperation* for (int i = 0; i < _edges.length(); i++) {
MoveOperation* s = _edges.at(i);
assert(!killer.contains(s->dst()), "multiple moves with the same register as destination");
killer.put(s->dst(), s);
}
assert(!killer.contains(temp_register), "make sure temp isn't in the registers that are killed");
// create links between loads and stores for (int i = 0; i < _edges.length(); i++) {
_edges.at(i)->link(killer);
}
// at this point, all the move operations are chained together // in one or more doubly linked lists. Processing them backwards finds // the beginning of the chain, forwards finds the end. If there's // a cycle it can be broken at any point, so pick an edge and walk // backward until the list ends or we end where we started. for (int e = 0; e < _edges.length(); e++) {
MoveOperation* s = _edges.at(e); if (!s->is_processed()) {
MoveOperation* start = s; // search for the beginning of the chain or cycle while (start->prev() != nullptr && start->prev() != s) {
start = start->prev();
} if (start->prev() == s) {
start->break_cycle(temp_register);
} // walk the chain forward inserting to store list while (start != nullptr) {
_moves.push(start->as_move());
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.