/* * Copyright (c) 1998, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
#ifndef PRODUCT #define DEBUG_ARG(x) , x #else #define DEBUG_ARG(x) #endif
//------------------------------Scheduling---------------------------------- // This class contains all the information necessary to implement instruction // scheduling and bundling. class Scheduling {
private: // Arena to use
Arena *_arena;
// Control-Flow Graph info
PhaseCFG *_cfg;
// Register Allocation info
PhaseRegAlloc *_regalloc;
// Number of nodes in the method
uint _node_bundling_limit;
// List of scheduled nodes. Generated in reverse order
Node_List _scheduled;
// List of nodes currently available for choosing for scheduling
Node_List _available;
// For each instruction beginning a bundle, the number of following // nodes to be bundled with it.
Bundle *_node_bundling_base;
// Mapping from register to Node
Node_List _reg_node;
// Free list for pinch nodes.
Node_List _pinch_free_list;
// Number of uses of this node within the containing basic block. short *_uses;
// Schedulable portion of current block. Skips Region/Phi/CreateEx up // front, branch+proj at end. Also skips Catch/CProj (same as // branch-at-end), plus just-prior exception-throwing call.
uint _bb_start, _bb_end;
// Latency from the end of the basic block as scheduled unsignedshort *_current_latency;
// Remember the next node
Node *_next_node;
// Use this for an unconditional branch delay slot
Node *_unconditional_delay_slot;
// Pointer to a Nop
MachNopNode *_nop;
// Length of the current bundle, in instructions
uint _bundle_instr_count;
// Current Cycle number, for computing latencies and bundling
uint _bundle_cycle_number;
// Bundle information
Pipeline_Use_Element _bundle_use_elements[resource_count];
Pipeline_Use _bundle_use;
// Dump the available list void dump_available() const;
// Add a node to the current bundle void AddNodeToBundle(Node *n, const Block *bb);
// Add a node to the list of available nodes void AddNodeToAvailableList(Node *n);
// Compute the local use count for the nodes in a block, and compute // the list of instructions with no uses in the block as available void ComputeUseCount(const Block *bb);
// Choose an instruction from the available list to add to the bundle
Node * ChooseNodeToBundle();
// See if this Node fits into the currently accumulating bundle bool NodeFitsInBundle(Node *n);
// Decrement the use count for a node void DecrementUseCounts(Node *n, const Block *bb);
// Garbage collect pinch nodes for reuse by other blocks. void garbage_collect_pinch_nodes(); // Clean up a pinch node for reuse (helper for above). void cleanup_pinch( Node *pinch );
// Information for statistics gathering #ifndef PRODUCT private: // Gather information on size of nops relative to total
uint _branches, _unconditional_delays;
void C2SafepointPollStubTable::emit(CodeBuffer& cb) {
MacroAssembler masm(&cb); for (int i = _safepoints.length() - 1; i >= 0; i--) { // Make sure there is enough space in the code buffer if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); return;
}
int C2SafepointPollStubTable::estimate_stub_size() const { if (_safepoints.length() == 0) { return 0;
}
int result = stub_size_lazy() * _safepoints.length();
#ifdef ASSERT
Compile* const C = Compile::current();
BufferBlob* const blob = C->output()->scratch_buffer_blob(); int size = 0;
for (int i = _safepoints.length() - 1; i >= 0; i--) {
CodeBuffer cb(blob->content_begin(), C->output()->scratch_buffer_code_size());
MacroAssembler masm(&cb);
C2SafepointPollStub* entry = _safepoints.at(i);
emit_stub(masm, entry);
size += cb.insts_size();
}
assert(size == result, "stubs should not have variable size"); #endif
return result;
}
// Nmethod entry barrier stubs
C2EntryBarrierStub* C2EntryBarrierStubTable::add_entry_barrier() {
assert(_stub == NULL, "There can only be one entry barrier stub");
_stub = new (Compile::current()->comp_arena()) C2EntryBarrierStub(); return _stub;
}
void C2EntryBarrierStubTable::emit(CodeBuffer& cb) { if (_stub == NULL) { // No stub - nothing to do return;
}
C2_MacroAssembler masm(&cb); // Make sure there is enough space in the code buffer if (cb.insts()->maybe_expand_to_ensure_remaining(PhaseOutput::MAX_inst_size) && cb.blob() == NULL) {
ciEnv::current()->record_failure("CodeCache is full"); return;
}
intptr_t before = masm.offset();
masm.emit_entry_barrier_stub(_stub);
intptr_t after = masm.offset(); int actual_size = (int)(after - before); int expected_size = masm.entry_barrier_stub_size();
assert(actual_size == expected_size, "Estimated size is wrong, expected %d, was %d", expected_size, actual_size);
}
int C2EntryBarrierStubTable::estimate_stub_size() const { if (BarrierSet::barrier_set()->barrier_set_nmethod() == NULL) { // No nmethod entry barrier? return 0;
}
void PhaseOutput::perform_mach_node_analysis() { // Late barrier analysis must be done after schedule and bundle // Otherwise liveness based spilling will fail
BarrierSetC2* bs = BarrierSet::barrier_set()->barrier_set_c2();
bs->late_barrier_analysis();
// Convert Nodes to instruction bits and pass off to the VM void PhaseOutput::Output() { // RootNode goes
assert( C->cfg()->get_root_block()->number_of_nodes() == 0, "" );
// The number of new nodes (mostly MachNop) is proportional to // the number of java calls and inner loops which are aligned. if ( C->check_node_count((NodeLimitFudgeFactor + C->java_calls()*3 +
C->inner_loops()*(OptoLoopAlignment-1)), "out of nodes before code generation" ) ) { return;
} // Make sure I can find the Start Node
Block *entry = C->cfg()->get_block(1);
Block *broot = C->cfg()->get_root_block();
// Replace StartNode with prolog
MachPrologNode *prolog = new MachPrologNode();
entry->map_node(prolog, 0);
C->cfg()->map_node_to_block(prolog, entry);
C->cfg()->unmap_node_from_block(start); // start is no longer in any block
// Virtual methods need an unverified entry point
if( C->is_osr_compilation() ) { if( PoisonOSREntry ) { // TODO: Should use a ShouldNotReachHereNode...
C->cfg()->insert( broot, 0, new MachBreakpointNode() );
}
} else { if( C->method() && !C->method()->flags().is_static() ) { // Insert unvalidated entry point
C->cfg()->insert( broot, 0, new MachUEPNode() );
}
}
// Break before main entry point if ((C->method() && C->directive()->BreakAtExecuteOption) ||
(OptoBreakpoint && C->is_method_compilation()) ||
(OptoBreakpointOSR && C->is_osr_compilation()) ||
(OptoBreakpointC2R && !C->method()) ) { // checking for C->method() means that OptoBreakpoint does not apply to // runtime stubs or frame converters
C->cfg()->insert( entry, 1, new MachBreakpointNode() );
}
// Insert epilogs before every return for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
Block* block = C->cfg()->get_block(i); if (!block->is_connector() && block->non_connector_successor(0) == C->cfg()->get_root_block()) { // Found a program exit point?
Node* m = block->end(); if (m->is_Mach() && m->as_Mach()->ideal_Opcode() != Op_Halt) {
MachEpilogNode* epilog = new MachEpilogNode(m->as_Mach()->ideal_Opcode() == Op_Return);
block->add_inst(epilog);
C->cfg()->map_node_to_block(epilog, block);
}
}
}
// Keeper of sizing aspects
_buf_sizes = BufferSizingData();
// Initialize code buffer
estimate_buffer_size(_buf_sizes._const); if (C->failing()) return;
// Pre-compute the length of blocks and replace // long branches with short if machine supports it. // Must be done before ScheduleAndBundle due to SPARC delay slots
uint* blk_starts = NEW_RESOURCE_ARRAY(uint, C->cfg()->number_of_blocks() + 1);
blk_starts[0] = 0;
shorten_branches(blk_starts);
ScheduleAndBundle(); if (C->failing()) { return;
}
perform_mach_node_analysis();
// Complete sizing of codebuffer
CodeBuffer* cb = init_buffer(); if (cb == NULL || C->failing()) { return;
}
BuildOopMaps();
if (C->failing()) { return;
}
fill_buffer(cb, blk_starts);
}
bool PhaseOutput::need_stack_bang(int frame_size_in_bytes) const { // Determine if we need to generate a stack overflow check. // Do it if the method is not a stub function and // has java calls or has frame size > vm_page_size/8. // The debug VM checks that deoptimization doesn't trigger an // unexpected stack overflow (compiled method stack banging should // guarantee it doesn't happen) so we always need the stack bang in // a debug VM. return (C->stub_function() == NULL &&
(C->has_java_calls() || frame_size_in_bytes > os::vm_page_size()>>3
DEBUG_ONLY(|| true)));
}
bool PhaseOutput::need_register_stack_bang() const { // Determine if we need to generate a register stack overflow check. // This is only used on architectures which have split register // and memory stacks (ie. IA64). // Bang if the method is not a stub function and has java calls return (C->stub_function() == NULL && C->has_java_calls());
}
// Compute the size of first NumberOfLoopInstrToAlign instructions at the top // of a loop. When aligning a loop we need to provide enough instructions // in cpu's fetch buffer to feed decoders. The loop alignment could be // avoided if we have enough instructions in fetch buffer at the head of a loop. // By default, the size is set to 999999 by Block's constructor so that // a loop will be aligned if the size is not reset here. // // Note: Mach instructions could contain several HW instructions // so the size is estimated only. // void PhaseOutput::compute_loop_first_inst_sizes() { // The next condition is used to gate the loop alignment optimization. // Don't aligned a loop if there are enough instructions at the head of a loop // or alignment padding is larger then MaxLoopPad. By default, MaxLoopPad // is equal to OptoLoopAlignment-1 except on new Intel cpus, where it is // equal to 11 bytes which is the largest address NOP instruction. if (MaxLoopPad < OptoLoopAlignment - 1) {
uint last_block = C->cfg()->number_of_blocks() - 1; for (uint i = 1; i <= last_block; i++) {
Block* block = C->cfg()->get_block(i); // Check the first loop's block which requires an alignment. if (block->loop_alignment() > (uint)relocInfo::addr_unit()) {
uint sum_size = 0;
uint inst_cnt = NumberOfLoopInstrToAlign;
inst_cnt = block->compute_first_inst_size(sum_size, inst_cnt, C->regalloc());
// Check subsequent fallthrough blocks if the loop's first // block(s) does not have enough instructions.
Block *nb = block; while(inst_cnt > 0 &&
i < last_block &&
!C->cfg()->get_block(i + 1)->has_loop_alignment() &&
!nb->has_successor(block)) {
i++;
nb = C->cfg()->get_block(i);
inst_cnt = nb->compute_first_inst_size(sum_size, inst_cnt, C->regalloc());
} // while( inst_cnt > 0 && i < last_block )
// The architecture description provides short branch variants for some long // branch instructions. Replace eligible long branches with short branches. void PhaseOutput::shorten_branches(uint* blk_starts) {
// Initialize the sizes to 0 int code_size = 0; // Size in bytes of generated code int stub_size = 0; // Size in bytes of all stub entries // Size in bytes of all relocation entries, including those in local stubs. // Start with 2-bytes of reloc info for the unvalidated entry point int reloc_size = 1; // Number of relocation entries
// Make three passes. The first computes pessimistic blk_starts, // relative jmp_offset and reloc_size information. The second performs // short branch substitution using the pessimistic sizing. The // third inserts nops where needed.
// Step one, perform a pessimistic sizing pass.
uint last_call_adr = max_juint;
uint last_avoid_back_to_back_adr = max_juint;
uint nop_size = (new MachNopNode())->size(C->regalloc()); for (uint i = 0; i < nblocks; i++) { // For all blocks
Block* block = C->cfg()->get_block(i);
_block = block;
// During short branch replacement, we store the relative (to blk_starts) // offset of jump in jmp_offset, rather than the absolute offset of jump. // This is so that we do not need to recompute sizes of all nodes when // we compute correct blk_starts in our next sizing pass.
jmp_offset[i] = 0;
jmp_size[i] = 0;
jmp_nidx[i] = -1;
DEBUG_ONLY( jmp_target[i] = 0; )
DEBUG_ONLY( jmp_rule[i] = 0; )
// Sum all instruction sizes to compute block size
uint last_inst = block->number_of_nodes();
uint blk_size = 0; for (uint j = 0; j < last_inst; j++) {
_index = j;
Node* nj = block->get_node(_index); // Handle machine instruction nodes if (nj->is_Mach()) {
MachNode* mach = nj->as_Mach();
blk_size += (mach->alignment_required() - 1) * relocInfo::addr_unit(); // assume worst case padding
reloc_size += mach->reloc(); if (mach->is_MachCall()) { // add size information for trampoline stub // class CallStubImpl is platform-specific and defined in the *.ad files.
stub_size += CallStubImpl::size_call_trampoline();
reloc_size += CallStubImpl::reloc_call_trampoline();
MachCallNode *mcall = mach->as_MachCall(); // This destination address is NOT PC-relative
if (mcall->is_MachCallJava() && mcall->as_MachCallJava()->_method) {
stub_size += CompiledStaticCall::to_interp_stub_size();
reloc_size += CompiledStaticCall::reloc_to_interp_stub();
}
} elseif (mach->is_MachSafePoint()) { // If call/safepoint are adjacent, account for possible // nop to disambiguate the two safepoints. // ScheduleAndBundle() can rearrange nodes in a block, // check for all offsets inside this block. if (last_call_adr >= blk_starts[i]) {
blk_size += nop_size;
}
} if (mach->avoid_back_to_back(MachNode::AVOID_BEFORE)) { // Nop is inserted between "avoid back to back" instructions. // ScheduleAndBundle() can rearrange nodes in a block, // check for all offsets inside this block. if (last_avoid_back_to_back_adr >= blk_starts[i]) {
blk_size += nop_size;
}
} if (mach->may_be_short_branch()) { if (!nj->is_MachBranch()) { #ifndef PRODUCT
nj->dump(3); #endif
Unimplemented();
}
assert(jmp_nidx[i] == -1, "block should have only one branch");
jmp_offset[i] = blk_size;
jmp_size[i] = nj->size(C->regalloc());
jmp_nidx[i] = j;
has_short_branch_candidate = true;
}
}
blk_size += nj->size(C->regalloc()); // Remember end of call offset if (nj->is_MachCall() && !nj->is_MachCallLeaf()) {
last_call_adr = blk_starts[i]+blk_size;
} // Remember end of avoid_back_to_back offset if (nj->is_Mach() && nj->as_Mach()->avoid_back_to_back(MachNode::AVOID_AFTER)) {
last_avoid_back_to_back_adr = blk_starts[i]+blk_size;
}
}
// When the next block starts a loop, we may insert pad NOP // instructions. Since we cannot know our future alignment, // assume the worst. if (i < nblocks - 1) {
Block* nb = C->cfg()->get_block(i + 1); int max_loop_pad = nb->code_alignment()-relocInfo::addr_unit(); if (max_loop_pad > 0) {
assert(is_power_of_2(max_loop_pad+relocInfo::addr_unit()), ""); // Adjust last_call_adr and/or last_avoid_back_to_back_adr. // If either is the last instruction in this block, bump by // max_loop_pad in lock-step with blk_size, so sizing // calculations in subsequent blocks still can conservatively // detect that it may the last instruction in this block. if (last_call_adr == blk_starts[i]+blk_size) {
last_call_adr += max_loop_pad;
} if (last_avoid_back_to_back_adr == blk_starts[i]+blk_size) {
last_avoid_back_to_back_adr += max_loop_pad;
}
blk_size += max_loop_pad;
block_worst_case_pad[i + 1] = max_loop_pad;
}
}
// Save block size; update total method size
blk_starts[i+1] = blk_starts[i]+blk_size;
}
// Step two, replace eligible long jumps. bool progress = true;
uint last_may_be_short_branch_adr = max_juint; while (has_short_branch_candidate && progress) {
progress = false;
has_short_branch_candidate = false; int adjust_block_start = 0; for (uint i = 0; i < nblocks; i++) {
Block* block = C->cfg()->get_block(i); int idx = jmp_nidx[i];
MachNode* mach = (idx == -1) ? NULL: block->get_node(idx)->as_Mach(); if (mach != NULL && mach->may_be_short_branch()) { #ifdef ASSERT
assert(jmp_size[i] > 0 && mach->is_MachBranch(), "sanity"); int j; // Find the branch; ignore trailing NOPs. for (j = block->number_of_nodes()-1; j>=0; j--) {
Node* n = block->get_node(j); if (!n->is_Mach() || n->as_Mach()->ideal_Opcode() != Op_Con) break;
}
assert(j >= 0 && j == idx && block->get_node(j) == (Node*)mach, "sanity"); #endif int br_size = jmp_size[i]; int br_offs = blk_starts[i] + jmp_offset[i];
// This requires the TRUE branch target be in succs[0]
uint bnum = block->non_connector_successor(0)->_pre_order; int offset = blk_starts[bnum] - br_offs; if (bnum > i) { // adjust following block's offset
offset -= adjust_block_start;
}
// This block can be a loop header, account for the padding // in the previous block. int block_padding = block_worst_case_pad[i];
assert(i == 0 || block_padding == 0 || br_offs >= block_padding, "Should have at least a padding on top"); // In the following code a nop could be inserted before // the branch which will increase the backward distance. bool needs_padding = ((uint)(br_offs - block_padding) == last_may_be_short_branch_adr);
assert(!needs_padding || jmp_offset[i] == 0, "padding only branches at the beginning of block");
if (needs_padding && offset <= 0)
offset -= nop_size;
if (C->matcher()->is_short_branch_offset(mach->rule(), br_size, offset)) { // We've got a winner. Replace this branch.
MachNode* replacement = mach->as_MachBranch()->short_branch_version();
// Update the jmp_size. int new_size = replacement->size(C->regalloc()); int diff = br_size - new_size;
assert(diff >= (int)nop_size, "short_branch size should be smaller"); // Conservatively take into account padding between // avoid_back_to_back branches. Previous branch could be // converted into avoid_back_to_back branch during next // rounds. if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
jmp_offset[i] += nop_size;
diff -= nop_size;
}
adjust_block_start += diff;
block->map_node(replacement, idx);
mach->subsume_by(replacement, C);
mach = replacement;
progress = true;
jmp_size[i] = new_size;
DEBUG_ONLY( jmp_target[i] = bnum; );
DEBUG_ONLY( jmp_rule[i] = mach->rule(); );
} else { // The jump distance is not short, try again during next iteration.
has_short_branch_candidate = true;
}
} // (mach->may_be_short_branch()) if (mach != NULL && (mach->may_be_short_branch() ||
mach->avoid_back_to_back(MachNode::AVOID_AFTER))) {
last_may_be_short_branch_adr = blk_starts[i] + jmp_offset[i] + jmp_size[i];
}
blk_starts[i+1] -= adjust_block_start;
}
}
#ifdef ASSERT for (uint i = 0; i < nblocks; i++) { // For all blocks if (jmp_target[i] != 0) { int br_size = jmp_size[i]; int offset = blk_starts[jmp_target[i]]-(blk_starts[i] + jmp_offset[i]); if (!C->matcher()->is_short_branch_offset(jmp_rule[i], br_size, offset)) {
tty->print_cr("target (%d) - jmp_offset(%d) = offset (%d), jump_size(%d), jmp_block B%d, target_block B%d", blk_starts[jmp_target[i]], blk_starts[i] + jmp_offset[i], offset, br_size, i, jmp_target[i]);
}
assert(C->matcher()->is_short_branch_offset(jmp_rule[i], br_size, offset), "Displacement too large for short jmp");
}
} #endif
// Step 3, compute the offsets of all blocks, will be done in fill_buffer() // after ScheduleAndBundle().
// Relocation records
reloc_size += 1; // Relo entry for exception handler
// Adjust reloc_size to number of record of relocation info // Min is 2 bytes, max is probably 6 or 8, with a tax up to 25% for // a relocation index. // The CodeBuffer will expand the locs array if this estimate is too low.
reloc_size *= 10 / sizeof(relocInfo);
//------------------------------FillLocArray----------------------------------- // Create a bit of debug info and append it to the array. The mapping is from // Java local or expression stack to constant, register or stack-slot. For // doubles, insert 2 mappings and return 1 (to tell the caller that the next // entry has been taken care of and caller should skip it). static LocationValue *new_loc_value( PhaseRegAlloc *ra, OptoReg::Name regnum, Location::Type l_type ) { // This should never have accepted Bad before
assert(OptoReg::is_valid(regnum), "location must be valid"); return (OptoReg::is_reg(regnum))
? new LocationValue(Location::new_reg_loc(l_type, OptoReg::as_VMReg(regnum)) )
: new LocationValue(Location::new_stk_loc(l_type, ra->reg2offset(regnum)));
}
ObjectValue*
PhaseOutput::sv_for_node_id(GrowableArray<ScopeValue*> *objs, int id) { for (int i = 0; i < objs->length(); i++) {
assert(objs->at(i)->is_object(), "corrupt object cache");
ObjectValue* sv = (ObjectValue*) objs->at(i); if (sv->id() == id) { return sv;
}
} // Otherwise.. return NULL;
}
void PhaseOutput::FillLocArray( int idx, MachSafePointNode* sfpt, Node *local,
GrowableArray<ScopeValue*> *array,
GrowableArray<ScopeValue*> *objs ) {
assert( local, "use _top instead of null" ); if (array->length() != idx) {
assert(array->length() == idx + 1, "Unexpected array count"); // Old functionality: // return // New functionality: // Assert if the local is not top. In product mode let the new node // override the old entry.
assert(local == C->top(), "LocArray collision"); if (local == C->top()) { return;
}
array->pop();
} const Type *t = local->bottom_type();
// Is it a safepoint scalar object node? if (local->is_SafePointScalarObject()) {
SafePointScalarObjectNode* spobj = local->as_SafePointScalarObject();
ObjectValue* sv = sv_for_node_id(objs, spobj->_idx); if (sv == NULL) {
ciKlass* cik = t->is_oopptr()->exact_klass();
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
sv = new ObjectValue(spobj->_idx, new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
set_sv_for_object_node(objs, sv);
uint first_ind = spobj->first_index(sfpt->jvms()); for (uint i = 0; i < spobj->n_fields(); i++) {
Node* fld_node = sfpt->in(first_ind+i);
(void)FillLocArray(sv->field_values()->length(), sfpt, fld_node, sv->field_values(), objs);
}
}
array->append(sv); return;
}
// Grab the register number for the local
OptoReg::Name regnum = C->regalloc()->get_reg_first(local); if( OptoReg::is_valid(regnum) ) {// Got a register/stack? // Record the double as two float registers. // The register mask for such a value always specifies two adjacent // float registers, with the lower register number even. // Normally, the allocation of high and low words to these registers // is irrelevant, because nearly all operations on register pairs // (e.g., StoreD) treat them as a single unit. // Here, we assume in addition that the words in these two registers // stored "naturally" (by operations like StoreD and double stores // within the interpreter) such that the lower-numbered register // is written to the lower memory address. This may seem like // a machine dependency, but it is not--it is a requirement on // the author of the <arch>.ad file to ensure that, for every // even/odd double-register pair to which a double may be allocated, // the word in the even single-register is stored to the first // memory word. (Note that register numbers are completely // arbitrary, and are not tied to any machine-level encodings.) #ifdef _LP64 if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon ) {
array->append(new ConstantIntValue((jint)0));
array->append(new_loc_value( C->regalloc(), regnum, Location::dbl ));
} elseif ( t->base() == Type::Long ) {
array->append(new ConstantIntValue((jint)0));
array->append(new_loc_value( C->regalloc(), regnum, Location::lng ));
} elseif ( t->base() == Type::RawPtr ) { // jsr/ret return address which must be restored into the full // width 64-bit stack slot.
array->append(new_loc_value( C->regalloc(), regnum, Location::lng ));
} #else//_LP64 if( t->base() == Type::DoubleBot || t->base() == Type::DoubleCon || t->base() == Type::Long ) { // Repack the double/long as two jints. // The convention the interpreter uses is that the second local // holds the first raw word of the native double representation. // This is actually reasonable, since locals and stack arrays // grow downwards in all implementations. // (If, on some machine, the interpreter's Java locals or stack // were to grow upwards, the embedded doubles would be word-swapped.)
array->append(new_loc_value( C->regalloc(), OptoReg::add(regnum,1), Location::normal ));
array->append(new_loc_value( C->regalloc(), regnum , Location::normal ));
} #endif//_LP64 elseif( (t->base() == Type::FloatBot || t->base() == Type::FloatCon) &&
OptoReg::is_reg(regnum) ) {
array->append(new_loc_value( C->regalloc(), regnum, Matcher::float_in_double()
? Location::float_in_dbl : Location::normal ));
} elseif( t->base() == Type::Int && OptoReg::is_reg(regnum) ) {
array->append(new_loc_value( C->regalloc(), regnum, Matcher::int_in_long
? Location::int_in_long : Location::normal ));
} elseif( t->base() == Type::NarrowOop ) {
array->append(new_loc_value( C->regalloc(), regnum, Location::narrowoop ));
} elseif (t->base() == Type::VectorA || t->base() == Type::VectorS ||
t->base() == Type::VectorD || t->base() == Type::VectorX ||
t->base() == Type::VectorY || t->base() == Type::VectorZ) {
array->append(new_loc_value( C->regalloc(), regnum, Location::vector ));
} elseif (C->regalloc()->is_oop(local)) {
assert(t->base() == Type::OopPtr || t->base() == Type::InstPtr ||
t->base() == Type::AryPtr, "Unexpected type: %s", t->msg());
array->append(new_loc_value( C->regalloc(), regnum, Location::oop ));
} else {
assert(t->base() == Type::Int || t->base() == Type::Half ||
t->base() == Type::FloatCon || t->base() == Type::FloatBot, "Unexpected type: %s", t->msg());
array->append(new_loc_value( C->regalloc(), regnum, Location::normal ));
} return;
}
// No register. It must be constant data. switch (t->base()) { case Type::Half: // Second half of a double
ShouldNotReachHere(); // Caller should skip 2nd halves break; case Type::AnyPtr:
array->append(new ConstantOopWriteValue(NULL)); break; case Type::AryPtr: case Type::InstPtr: // fall through
array->append(new ConstantOopWriteValue(t->isa_oopptr()->const_oop()->constant_encoding())); break; case Type::NarrowOop: if (t == TypeNarrowOop::NULL_PTR) {
array->append(new ConstantOopWriteValue(NULL));
} else {
array->append(new ConstantOopWriteValue(t->make_ptr()->isa_oopptr()->const_oop()->constant_encoding()));
} break; case Type::Int:
array->append(new ConstantIntValue(t->is_int()->get_con())); break; case Type::RawPtr: // A return address (T_ADDRESS).
assert((intptr_t)t->is_ptr()->get_con() < (intptr_t)0x10000, "must be a valid BCI"); #ifdef _LP64 // Must be restored to the full-width 64-bit stack slot.
array->append(new ConstantLongValue(t->is_ptr()->get_con())); #else
array->append(new ConstantIntValue(t->is_ptr()->get_con())); #endif break; case Type::FloatCon: { float f = t->is_float_constant()->getf();
array->append(new ConstantIntValue(jint_cast(f))); break;
} case Type::DoubleCon: {
jdouble d = t->is_double_constant()->getd(); #ifdef _LP64
array->append(new ConstantIntValue((jint)0));
array->append(new ConstantDoubleValue(d)); #else // Repack the double as two jints. // The convention the interpreter uses is that the second local // holds the first raw word of the native double representation. // This is actually reasonable, since locals and stack arrays // grow downwards in all implementations. // (If, on some machine, the interpreter's Java locals or stack // were to grow upwards, the embedded doubles would be word-swapped.)
jlong_accessor acc;
acc.long_value = jlong_cast(d);
array->append(new ConstantIntValue(acc.words[1]));
array->append(new ConstantIntValue(acc.words[0])); #endif break;
} case Type::Long: {
jlong d = t->is_long()->get_con(); #ifdef _LP64
array->append(new ConstantIntValue((jint)0));
array->append(new ConstantLongValue(d)); #else // Repack the long as two jints. // The convention the interpreter uses is that the second local // holds the first raw word of the native double representation. // This is actually reasonable, since locals and stack arrays // grow downwards in all implementations. // (If, on some machine, the interpreter's Java locals or stack // were to grow upwards, the embedded doubles would be word-swapped.)
jlong_accessor acc;
acc.long_value = d;
array->append(new ConstantIntValue(acc.words[1]));
array->append(new ConstantIntValue(acc.words[0])); #endif break;
} case Type::Top: // Add an illegal value here
array->append(new LocationValue(Location())); break; default:
ShouldNotReachHere(); break;
}
}
// Determine if this node starts a bundle bool PhaseOutput::starts_bundle(const Node *n) const { return (_node_bundling_limit > n->_idx &&
_node_bundling_base[n->_idx].starts_bundle());
}
//--------------------------Process_OopMap_Node-------------------------------- void PhaseOutput::Process_OopMap_Node(MachNode *mach, int current_offset) { // Handle special safepoint nodes for synchronization
MachSafePointNode *sfn = mach->as_MachSafePoint();
MachCallNode *mcall;
// Add the safepoint in the DebugInfoRecorder if( !mach->is_MachCall() ) {
mcall = NULL;
C->debug_info()->add_safepoint(safepoint_pc_offset, sfn->_oop_map);
} else {
mcall = mach->as_MachCall();
// Is the call a MethodHandle call? if (mcall->is_MachCallJava()) { if (mcall->as_MachCallJava()->_method_handle_invoke) {
assert(C->has_method_handle_invokes(), "must have been set during call generation");
is_method_handle_invoke = true;
}
arg_escape = mcall->as_MachCallJava()->_arg_escape;
}
// Check if a call returns an object. if (mcall->returns_pointer()) {
return_oop = true;
}
safepoint_pc_offset += mcall->ret_addr_offset();
C->debug_info()->add_safepoint(safepoint_pc_offset, mcall->_oop_map);
}
// Loop over the JVMState list to add scope information // Do not skip safepoints with a NULL method, they need monitor info
JVMState* youngest_jvms = sfn->jvms(); int max_depth = youngest_jvms->depth();
// Allocate the object pool for scalar-replaced objects -- the map from // small-integer keys (which can be recorded in the local and ostack // arrays) to descriptions of the object state.
GrowableArray<ScopeValue*> *objs = new GrowableArray<ScopeValue*>();
// Visit scopes from oldest to youngest. for (int depth = 1; depth <= max_depth; depth++) {
JVMState* jvms = youngest_jvms->of_depth(depth); int idx;
ciMethod* method = jvms->has_method() ? jvms->method() : NULL; // Safepoints that do not have method() set only provide oop-map and monitor info // to support GC; these do not support deoptimization. int num_locs = (method == NULL) ? 0 : jvms->loc_size(); int num_exps = (method == NULL) ? 0 : jvms->stk_size(); int num_mon = jvms->nof_monitors();
assert(method == NULL || jvms->bci() < 0 || num_locs == method->max_locals(), "JVMS local count must match that of the method");
// Add Local and Expression Stack Information
// Insert locals into the locarray
GrowableArray<ScopeValue*> *locarray = new GrowableArray<ScopeValue*>(num_locs); for( idx = 0; idx < num_locs; idx++ ) {
FillLocArray( idx, sfn, sfn->local(jvms, idx), locarray, objs );
}
// Add in mappings of the monitors
assert( !method ||
!method->is_synchronized() ||
method->is_native() ||
num_mon > 0 ||
!GenerateSynchronizationCode, "monitors must always exist for synchronized methods");
// Build the growable array of ScopeValues for exp stack
GrowableArray<MonitorValue*> *monarray = new GrowableArray<MonitorValue*>(num_mon);
// Loop over monitors and insert into array for (idx = 0; idx < num_mon; idx++) { // Grab the node that defines this monitor
Node* box_node = sfn->monitor_box(jvms, idx);
Node* obj_node = sfn->monitor_obj(jvms, idx);
// Create ScopeValue for object
ScopeValue *scval = NULL;
if (obj_node->is_SafePointScalarObject()) {
SafePointScalarObjectNode* spobj = obj_node->as_SafePointScalarObject();
scval = PhaseOutput::sv_for_node_id(objs, spobj->_idx); if (scval == NULL) { const Type *t = spobj->bottom_type();
ciKlass* cik = t->is_oopptr()->exact_klass();
assert(cik->is_instance_klass() ||
cik->is_array_klass(), "Not supported allocation.");
ObjectValue* sv = new ObjectValue(spobj->_idx, new ConstantOopWriteValue(cik->java_mirror()->constant_encoding()));
PhaseOutput::set_sv_for_object_node(objs, sv);
// We dump the object pool first, since deoptimization reads it in first.
C->debug_info()->dump_object_pool(objs);
// Build first class objects to pass to scope
DebugToken *locvals = C->debug_info()->create_scope_values(locarray);
DebugToken *expvals = C->debug_info()->create_scope_values(exparray);
DebugToken *monvals = C->debug_info()->create_monitor_values(monarray);
// Make method available for all Safepoints
ciMethod* scope_method = method ? method : C->method(); // Describe the scope here
assert(jvms->bci() >= InvocationEntryBci && jvms->bci() <= 0x10000, "must be a valid or entry BCI");
assert(!jvms->should_reexecute() || depth == max_depth, "reexecute allowed only for the youngest"); // Now we can describe the scope.
methodHandle null_mh; bool rethrow_exception = false;
C->debug_info()->describe_scope(
safepoint_pc_offset,
null_mh,
scope_method,
jvms->bci(),
jvms->should_reexecute(),
rethrow_exception,
is_method_handle_invoke,
return_oop,
has_ea_local_in_scope,
arg_escape,
locvals,
expvals,
monvals
);
} // End jvms loop
// Mark the end of the scope set.
C->debug_info()->end_safepoint(safepoint_pc_offset);
}
// A simplified version of Process_OopMap_Node, to handle non-safepoints. class NonSafepointEmitter {
Compile* C;
JVMState* _pending_jvms; int _pending_offset;
void observe_instruction(Node* n, int pc_offset) { if (!C->debug_info()->recording_non_safepoints()) return;
Node_Notes* nn = C->node_notes_at(n->_idx); if (nn == NULL || nn->jvms() == NULL) return; if (_pending_jvms != NULL &&
_pending_jvms->same_calls_as(nn->jvms())) { // Repeated JVMS? Stretch it up here.
_pending_offset = pc_offset;
} else { if (_pending_jvms != NULL &&
_pending_offset < pc_offset) {
emit_non_safepoint();
}
_pending_jvms = NULL; if (pc_offset > C->debug_info()->last_pc_offset()) { // This is the only way _pending_jvms can become non-NULL:
_pending_jvms = nn->jvms();
_pending_offset = pc_offset;
}
}
}
// Stay out of the way of real safepoints: void observe_safepoint(JVMState* jvms, int pc_offset) { if (_pending_jvms != NULL &&
!_pending_jvms->same_calls_as(jvms) &&
_pending_offset < pc_offset) {
emit_non_safepoint();
}
_pending_jvms = NULL;
}
// Set the initially allocated size
const_req = initial_const_capacity;
// The extra spacing after the code is necessary on some platforms. // Sometimes we need to patch in a jump after the last instruction, // if the nmethod has been deoptimized. (See 4932387, 4894843.)
// Compute the byte offset where we can store the deopt pc. if (C->fixed_slots() != 0) {
_orig_pc_slot_offset_in_bytes = C->regalloc()->reg2offset(OptoReg::stack2reg(_orig_pc_slot));
}
if (C->has_mach_constant_base_node()) {
uint add_size = 0; // Fill the constant table. // Note: This must happen before shorten_branches. for (uint i = 0; i < C->cfg()->number_of_blocks(); i++) {
Block* b = C->cfg()->get_block(i);
for (uint j = 0; j < b->number_of_nodes(); j++) {
Node* n = b->get_node(j);
// If the node is a MachConstantNode evaluate the constant // value section. if (n->is_MachConstant()) {
MachConstantNode* machcon = n->as_MachConstant();
machcon->eval_constant(C);
} elseif (n->is_Mach()) { // On Power there are more nodes that issue constants.
add_size += (n->as_Mach()->ins_num_consts() * 8);
}
}
}
// Calculate the offsets of the constants and the size of the // constant table (including the padding to the next section).
constant_table().calculate_offsets_and_size();
const_req = constant_table().size() + add_size;
}
// Initialize the space for the BufferBlob used to find and verify // instruction size in MachNode::emit_size()
init_scratch_buffer_blob(const_req);
}
CodeBuffer* PhaseOutput::init_buffer() { int stub_req = _buf_sizes._stub; int code_req = _buf_sizes._code; int const_req = _buf_sizes._const;
// nmethod and CodeBuffer count stubs & constants as part of method's code. // class HandlerImpl is platform-specific and defined in the *.ad files. int exception_handler_req = HandlerImpl::size_exception_handler() + MAX_stubs_size; // add marginal slop for handler int deopt_handler_req = HandlerImpl::size_deopt_handler() + MAX_stubs_size; // add marginal slop for handler
stub_req += MAX_stubs_size; // ensure per-stub margin
code_req += MAX_inst_size; // ensure per-instruction margin
if (StressCodeBuffers)
code_req = const_req = stub_req = exception_handler_req = deopt_handler_req = 0x10; // force expansion
// Have we run out of code space? if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full"); return NULL;
} // Configure the code buffer.
cb->initialize_consts_size(const_req);
cb->initialize_stubs_size(stub_req);
cb->initialize_oop_recorder(C->env()->oop_recorder());
// fill in the nop array for bundling computations
MachNode *_nop_list[Bundle::_nop_count];
Bundle::initialize_nops(_nop_list);
return cb;
}
//------------------------------fill_buffer------------------------------------ void PhaseOutput::fill_buffer(CodeBuffer* cb, uint* blk_starts) { // blk_starts[] contains offsets calculated during short branches processing, // offsets should not be increased during following steps.
// Compute the size of first NumberOfLoopInstrToAlign instructions at head // of a loop. It is used to determine the padding for loop alignment.
Compile::TracePhase tp("fill buffer", &timers[_t_fillBuffer]);
compute_loop_first_inst_sizes();
// Create oopmap set.
_oop_map_set = new OopMapSet();
// !!!!! This preserves old handling of oopmaps for now
C->debug_info()->set_oopmaps(_oop_map_set);
// Count and start of calls
uint* call_returns = NEW_RESOURCE_ARRAY(uint, nblocks+1);
uint return_offset = 0; int nop_size = (new MachNopNode())->size(C->regalloc());
int previous_offset = 0; int current_offset = 0; int last_call_offset = -1; int last_avoid_back_to_back_offset = -1; #ifdef ASSERT
uint* jmp_target = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_offset = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_size = NEW_RESOURCE_ARRAY(uint,nblocks);
uint* jmp_rule = NEW_RESOURCE_ARRAY(uint,nblocks); #endif
// Create an array of unused labels, one for each basic block, if printing is enabled #ifdefined(SUPPORT_OPTO_ASSEMBLY) int* node_offsets = NULL;
uint node_offset_limit = C->unique();
if (C->print_assembly()) {
node_offsets = NEW_RESOURCE_ARRAY(int, node_offset_limit);
} if (node_offsets != NULL) { // We need to initialize. Unused array elements may contain garbage and mess up PrintOptoAssembly.
memset(node_offsets, 0, node_offset_limit*sizeof(int));
} #endif
// Emit the constant table. if (C->has_mach_constant_base_node()) { if (!constant_table().emit(*cb)) {
C->record_failure("consts section overflow"); return;
}
}
// Create an array of labels, one for each basic block
Label* blk_labels = NEW_RESOURCE_ARRAY(Label, nblocks+1); for (uint i = 0; i <= nblocks; i++) {
blk_labels[i].init();
}
// Now fill in the code buffer
Node* delay_slot = NULL; for (uint i = 0; i < nblocks; i++) {
Block* block = C->cfg()->get_block(i);
_block = block;
Node* head = block->head();
// If this block needs to start aligned (i.e, can be reached other // than by falling-thru from the previous block), then force the // start of a new bundle. if (Pipeline::requires_bundling() && starts_bundle(head)) {
cb->flush_bundle(true);
}
// Define the label at the beginning of the basic block
MacroAssembler(cb).bind(blk_labels[block->_pre_order]);
uint last_inst = block->number_of_nodes();
// Emit block normally, except for last instruction. // Emit means "dump code bits into code buffer". for (uint j = 0; j<last_inst; j++) {
_index = j;
// Get the node
Node* n = block->get_node(j);
// See if delay slots are supported if (valid_bundle_info(n) && node_bundling(n)->used_in_unconditional_delay()) {
assert(delay_slot == NULL, "no use of delay slot node");
assert(n->size(C->regalloc()) == Pipeline::instr_unit_size(), "delay slot instruction wrong size");
delay_slot = n; continue;
}
// If this starts a new instruction group, then flush the current one // (but allow split bundles) if (Pipeline::requires_bundling() && starts_bundle(n))
cb->flush_bundle(false);
// Special handling for SafePoint/Call Nodes bool is_mcall = false; if (n->is_Mach()) {
MachNode *mach = n->as_Mach();
is_mcall = n->is_MachCall(); bool is_sfn = n->is_MachSafePoint();
// If this requires all previous instructions be flushed, then do so if (is_sfn || is_mcall || mach->alignment_required() != 1) {
cb->flush_bundle(true);
current_offset = cb->insts_size();
}
// A padding may be needed again since a previous instruction // could be moved to delay slot.
// align the instruction if necessary int padding = mach->compute_padding(current_offset); // Make sure safepoint node for polling is distinct from a call's // return by adding a nop if needed. if (is_sfn && !is_mcall && padding == 0 && current_offset == last_call_offset) {
padding = nop_size;
} if (padding == 0 && mach->avoid_back_to_back(MachNode::AVOID_BEFORE) &&
current_offset == last_avoid_back_to_back_offset) { // Avoid back to back some instructions.
padding = nop_size;
}
if (padding > 0) {
assert((padding % nop_size) == 0, "padding is not a multiple of NOP size"); int nops_cnt = padding / nop_size;
MachNode *nop = new MachNopNode(nops_cnt);
block->insert_node(nop, j++);
last_inst++;
C->cfg()->map_node_to_block(nop, block); // Ensure enough space.
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full"); return;
}
nop->emit(*cb, C->regalloc());
cb->flush_bundle(true);
current_offset = cb->insts_size();
}
bool observe_safepoint = is_sfn; // Remember the start of the last call in a basic block if (is_mcall) {
MachCallNode *mcall = mach->as_MachCall();
// This destination address is NOT PC-relative
mcall->method_set((intptr_t)mcall->entry_point());
// Save the return address
call_returns[block->_pre_order] = current_offset + mcall->ret_addr_offset();
// sfn will be valid whenever mcall is valid now because of inheritance if (observe_safepoint) { // Handle special safepoint nodes for synchronization if (!is_mcall) {
MachSafePointNode *sfn = mach->as_MachSafePoint(); // !!!!! Stubs only need an oopmap right now, so bail out if (sfn->jvms()->method() == NULL) { // Write the oopmap directly to the code blob??!! continue;
}
} // End synchronization
non_safepoints.observe_safepoint(mach->as_MachSafePoint()->jvms(),
current_offset);
Process_OopMap_Node(mach, current_offset);
} // End if safepoint
// If this is a null check, then add the start of the previous instruction to the list elseif( mach->is_MachNullCheck() ) {
inct_starts[inct_cnt++] = previous_offset;
}
// If this is a branch, then fill in the label with the target BB's label elseif (mach->is_MachBranch()) { // This requires the TRUE branch target be in succs[0]
uint block_num = block->non_connector_successor(0)->_pre_order;
// Try to replace long branch if delay slot is not used, // it is mostly for back branches since forward branch's // distance is not updated yet. bool delay_slot_is_used = valid_bundle_info(n) &&
C->output()->node_bundling(n)->use_unconditional_delay(); if (!delay_slot_is_used && mach->may_be_short_branch()) {
assert(delay_slot == NULL, "not expecting delay slot node"); int br_size = n->size(C->regalloc()); int offset = blk_starts[block_num] - current_offset; if (block_num >= i) { // Current and following block's offset are not // finalized yet, adjust distance by the difference // between calculated and final offsets of current block.
offset -= (blk_starts[i] - blk_offset);
} // In the following code a nop could be inserted before // the branch which will increase the backward distance. bool needs_padding = (current_offset == last_avoid_back_to_back_offset); if (needs_padding && offset <= 0)
offset -= nop_size;
if (C->matcher()->is_short_branch_offset(mach->rule(), br_size, offset)) { // We've got a winner. Replace this branch.
MachNode* replacement = mach->as_MachBranch()->short_branch_version();
// Update the jmp_size. int new_size = replacement->size(C->regalloc());
assert((br_size - new_size) >= (int)nop_size, "short_branch size should be smaller"); // Insert padding between avoid_back_to_back branches. if (needs_padding && replacement->avoid_back_to_back(MachNode::AVOID_BEFORE)) {
MachNode *nop = new MachNopNode();
block->insert_node(nop, j++);
C->cfg()->map_node_to_block(nop, block);
last_inst++;
nop->emit(*cb, C->regalloc());
cb->flush_bundle(true);
current_offset = cb->insts_size();
} #ifdef ASSERT
jmp_target[i] = block_num;
jmp_offset[i] = current_offset - blk_offset;
jmp_size[i] = new_size;
jmp_rule[i] = mach->rule(); #endif
block->map_node(replacement, j);
mach->subsume_by(replacement, C);
n = replacement;
mach = replacement;
}
}
mach->as_MachBranch()->label_set( &blk_labels[block_num], block_num );
} elseif (mach->ideal_Opcode() == Op_Jump) { for (uint h = 0; h < block->_num_succs; h++) {
Block* succs_block = block->_succs[h]; for (uint j = 1; j < succs_block->num_preds(); j++) {
Node* jpn = succs_block->pred(j); if (jpn->is_JumpProj() && jpn->in(0) == mach) {
uint block_num = succs_block->non_connector()->_pre_order;
Label *blkLabel = &blk_labels[block_num];
mach->add_case_label(jpn->as_JumpProj()->proj_no(), blkLabel);
}
}
}
} #ifdef ASSERT // Check that oop-store precedes the card-mark elseif (mach->ideal_Opcode() == Op_StoreCM) {
uint storeCM_idx = j; int count = 0; for (uint prec = mach->req(); prec < mach->len(); prec++) {
Node *oop_store = mach->in(prec); // Precedence edge if (oop_store == NULL) continue;
count++;
uint i4; for (i4 = 0; i4 < last_inst; ++i4) { if (block->get_node(i4) == oop_store) { break;
}
} // Note: This test can provide a false failure if other precedence // edges have been added to the storeCMNode.
assert(i4 == last_inst || i4 < storeCM_idx, "CM card-mark executes before oop-store");
}
assert(count > 0, "storeCM expects at least one precedence edge");
} #endif elseif (!n->is_Proj()) { // Remember the beginning of the previous instruction, in case // it's followed by a flag-kill and a null-check. Happens on // Intel all the time, with add-to-memory kind of opcodes.
previous_offset = current_offset;
}
// Not an else-if! // If this is a trap based cmp then add its offset to the list. if (mach->is_TrapBasedCheckNode()) {
inct_starts[inct_cnt++] = current_offset;
}
}
// Verify that there is sufficient space remaining
cb->insts()->maybe_expand_to_ensure_remaining(MAX_inst_size); if ((cb->blob() == NULL) || (!CompileBroker::should_compile_new_jobs())) {
C->record_failure("CodeCache is full"); return;
}
// Save the offset for the listing #ifdefined(SUPPORT_OPTO_ASSEMBLY) if ((node_offsets != NULL) && (n->_idx < node_offset_limit)) {
node_offsets[n->_idx] = cb->insts_size();
} #endif
assert(!C->failing(), "Should not reach here if failing.");
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.