/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
// Initialization // // Note: to break cycle with universe initialization, stubs are generated in two phases. // The first one generates stubs needed during universe init (e.g., _handle_must_compile_first_entry). // The second phase includes all other stubs (which may depend on universe being initialized.)
externvoid StubGenerator_generate(CodeBuffer* code, int phase); // only interface to generators
bool UnsafeCopyMemory::contains_pc(address pc) { for (int i = 0; i < UnsafeCopyMemory::_table_length; i++) {
UnsafeCopyMemory* entry = &UnsafeCopyMemory::_table[i]; if (pc >= entry->start_pc() && pc < entry->end_pc()) { returntrue;
}
} returnfalse;
}
address UnsafeCopyMemory::page_error_continue_pc(address pc) { for (int i = 0; i < UnsafeCopyMemory::_table_length; i++) {
UnsafeCopyMemory* entry = &UnsafeCopyMemory::_table[i]; if (pc >= entry->start_pc() && pc < entry->end_pc()) { return entry->error_exit_pc();
}
} return NULL;
}
void StubRoutines::initialize1() { if (_code1 == NULL) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 1", TRACETIME_LOG(Info, startuptime)); // Add extra space for large CodeEntryAlignment int max_aligned_stubs = 10; int size = code_size1 + CodeEntryAlignment * max_aligned_stubs;
_code1 = BufferBlob::create("StubRoutines (1)", size); if (_code1 == NULL) {
vm_exit_out_of_memory(code_size1, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (1)");
}
CodeBuffer buffer(_code1);
StubGenerator_generate(&buffer, 0); // When new stubs added we need to make sure there is some space left // to catch situation when we should increase size again.
assert(code_size1 == 0 || buffer.insts_remaining() > 200, "increase code_size1");
}
}
#ifdef ASSERT typedefvoid (*arraycopy_fn)(address src, address dst, int count);
// simple tests of generated arraycopy functions staticvoid test_arraycopy_func(address func, int alignment) { int v = 0xcc; int v2 = 0x11;
jlong lbuffer[8];
jlong lbuffer2[8];
address fbuffer = (address) lbuffer;
address fbuffer2 = (address) lbuffer2; unsignedint i; for (i = 0; i < sizeof(lbuffer); i++) {
fbuffer[i] = v; fbuffer2[i] = v2;
} // C++ does not guarantee jlong[] array alignment to 8 bytes. // Use middle of array to check that memory before it is not modified.
address buffer = align_up((address)&lbuffer[4], BytesPerLong);
address buffer2 = align_up((address)&lbuffer2[4], BytesPerLong); // do an aligned copy
((arraycopy_fn)func)(buffer, buffer2, 0); for (i = 0; i < sizeof(lbuffer); i++) {
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
} // adjust destination alignment
((arraycopy_fn)func)(buffer, buffer2 + alignment, 0); for (i = 0; i < sizeof(lbuffer); i++) {
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
} // adjust source alignment
((arraycopy_fn)func)(buffer + alignment, buffer2, 0); for (i = 0; i < sizeof(lbuffer); i++) {
assert(fbuffer[i] == v && fbuffer2[i] == v2, "shouldn't have copied anything");
}
} #endif// ASSERT
void StubRoutines::initializeContinuationStubs() { if (_code3 == NULL) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 3", TRACETIME_LOG(Info, startuptime));
_code3 = BufferBlob::create("StubRoutines (3)", code_size2); if (_code3 == NULL) {
vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (3)");
}
CodeBuffer buffer(_code3);
StubGenerator_generate(&buffer, 1); // When new stubs added we need to make sure there is some space left // to catch situation when we should increase size again.
assert(code_size2 == 0 || buffer.insts_remaining() > 200, "increase code_size3");
}
}
void StubRoutines::initialize2() { if (_code2 == NULL) {
ResourceMark rm;
TraceTime timer("StubRoutines generation 2", TRACETIME_LOG(Info, startuptime)); // Add extra space for large CodeEntryAlignment int max_aligned_stubs = 100; int size = code_size2 + CodeEntryAlignment * max_aligned_stubs;
_code2 = BufferBlob::create("StubRoutines (2)", size); if (_code2 == NULL) {
vm_exit_out_of_memory(code_size2, OOM_MALLOC_ERROR, "CodeCache: no room for StubRoutines (2)");
}
CodeBuffer buffer(_code2);
StubGenerator_generate(&buffer, 2); // When new stubs added we need to make sure there is some space left // to catch situation when we should increase size again.
assert(code_size2 == 0 || buffer.insts_remaining() > 200, "increase code_size2");
}
// Make sure all the arraycopy stubs properly handle zero count
TEST_ARRAYCOPY(jbyte);
TEST_ARRAYCOPY(jshort);
TEST_ARRAYCOPY(jint);
TEST_ARRAYCOPY(jlong);
#undef TEST_ARRAYCOPY
#define TEST_FILL(type) \ if (_##type##_fill != NULL) { \ union { \ double d; \
type body[96]; \
} s; \
\ int v = 32; \ for (int offset = -2; offset <= 2; offset++) { \ for (int i = 0; i < 96; i++) { \
s.body[i] = 1; \
} \
type* start = s.body + 8 + offset; \ for (int aligned = 0; aligned < 2; aligned++) { \ if (aligned) { \ if (((intptr_t)start) % HeapWordSize == 0) { \
((void (*)(type*, int, int))StubRoutines::_arrayof_##type##_fill)(start, v, 80); \
} else { \ continue; \
} \
} else { \
((void (*)(type*, int, int))StubRoutines::_##type##_fill)(start, v, 80); \
} \ for (int i = 0; i < 96; i++) { \ if (i < (8 + offset) || i >= (88 + offset)) { \
assert(s.body[i] == 1, "what?"); \
} else { \
assert(s.body[i] == 32, "what?"); \
} \
} \
} \
} \
} \
// Make sure all the copy runtime routines properly handle zero count
TEST_COPYRTN(jbyte);
TEST_COPYRTN(jshort);
TEST_COPYRTN(jint);
TEST_COPYRTN(jlong);
switch (t) { case T_BYTE: case T_BOOLEAN: if (!aligned) RETURN_STUB(jbyte_fill);
RETURN_STUB(arrayof_jbyte_fill); case T_CHAR: case T_SHORT: if (!aligned) RETURN_STUB(jshort_fill);
RETURN_STUB(arrayof_jshort_fill); case T_INT: case T_FLOAT: if (!aligned) RETURN_STUB(jint_fill);
RETURN_STUB(arrayof_jint_fill); case T_DOUBLE: case T_LONG: case T_ARRAY: case T_OBJECT: case T_NARROWOOP: case T_NARROWKLASS: case T_ADDRESS: case T_VOID: // Currently unsupported return NULL;
default:
ShouldNotReachHere(); return NULL;
}
#undef RETURN_STUB
}
// constants for computing the copy function enum {
COPYFUNC_UNALIGNED = 0,
COPYFUNC_ALIGNED = 1, // src, dest aligned to HeapWordSize
COPYFUNC_CONJOINT = 0,
COPYFUNC_DISJOINT = 2 // src != dest, or transfer can descend
};
// Note: The condition "disjoint" applies also for overlapping copies // where an descending copy is permitted (i.e., dest_offset <= src_offset).
address
StubRoutines::select_arraycopy_function(BasicType t, bool aligned, bool disjoint, constchar* &name, bool dest_uninitialized) { int selector =
(aligned ? COPYFUNC_ALIGNED : COPYFUNC_UNALIGNED) +
(disjoint ? COPYFUNC_DISJOINT : COPYFUNC_CONJOINT);
UnsafeCopyMemoryMark::~UnsafeCopyMemoryMark() { if (_ucm_entry != NULL) {
_ucm_entry->set_end_pc(_cgen->assembler()->pc()); if (_ucm_entry->error_exit_pc() == NULL) {
_ucm_entry->set_error_exit_pc(_cgen->assembler()->pc());
}
}
}
¤ Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.0.22Bemerkung:
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.