// Copyright 2015, ARM Limited // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of ARM Limited nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// Unbound Label Representation. // // We can have multiple branches using the same label before it is bound. // Assembler::bind() must then be able to enumerate all the branches and patch // them to target the final label location. // // When a Label is unbound with uses, its offset is pointing to the tip of a // linked list of uses. The uses can be branches or adr/adrp instructions. In // the case of branches, the next member in the linked list is simply encoded // as the branch target. For adr/adrp, the relative pc offset is encoded in the // immediate field as a signed instruction offset. // // In both cases, the end of the list is encoded as a 0 pc offset, i.e. the // tail is pointing to itself.
staticconst ptrdiff_t kEndOfLabelUseList = 0;
BufferOffset
MozBaseAssembler::NextLink(BufferOffset cur)
{
Instruction* link = getInstructionAt(cur); // Raw encoded offset.
ptrdiff_t offset = link->ImmPCRawOffset(); // End of the list is encoded as 0. if (offset == kEndOfLabelUseList) return BufferOffset(); // The encoded offset is the number of instructions to move. return BufferOffset(cur.getOffset() + offset * kInstructionSize);
}
// A common implementation for the LinkAndGet<Type>OffsetTo helpers. // // If the label is bound, returns the offset as a multiple of 1 << elementShift. // Otherwise, links the instruction to the label and returns the raw offset to // encode. (This will be an instruction count.) // // The offset is calculated by aligning the PC and label addresses down to a // multiple of 1 << elementShift, then calculating the (scaled) offset between // them. This matches the semantics of adrp, for example. (Assuming that the // assembler buffer is page-aligned, which it probably isn't.) // // For an unbound label, the returned offset will be encodable in the provided // branch range. If the label is already bound, the caller is expected to make // sure that it is in range, and emit the necessary branch instrutions if it // isn't. //
ptrdiff_t
MozBaseAssembler::LinkAndGetOffsetTo(BufferOffset branch, ImmBranchRangeType branchRange, unsigned elementShift, Label* label)
{ if (armbuffer_.oom()) return kEndOfLabelUseList;
if (label->bound()) { // The label is bound: all uses are already linked.
ptrdiff_t branch_offset = ptrdiff_t(branch.getOffset() >> elementShift);
ptrdiff_t label_offset = ptrdiff_t(label->offset() >> elementShift); return label_offset - branch_offset;
}
// Keep track of short-range branches targeting unbound labels. We may need // to insert veneers in PatchShortRangeBranchToVeneer() below. if (branchRange < NumShortBranchRangeTypes) { // This is the last possible branch target.
BufferOffset deadline(branch.getOffset() +
Instruction::ImmBranchMaxForwardOffset(branchRange));
armbuffer_.registerBranchDeadline(branchRange, deadline);
}
// The label is unbound and previously unused: Store the offset in the label // itself for patching by bind(). if (!label->used()) {
label->use(branch.getOffset()); return kEndOfLabelUseList;
}
// The label is unbound and has multiple users. Create a linked list between // the branches, and update the linked list head in the label struct. This is // not always trivial since the branches in the linked list have limited // ranges.
// What is the earliest buffer offset that would be reachable by the branch // we're about to add?
ptrdiff_t earliestReachable =
branch.getOffset() + Instruction::ImmBranchMinBackwardOffset(branchRange);
// If the existing instruction at the head of the list is within reach of the // new branch, we can simply insert the new branch at the front of the list. if (label->offset() >= earliestReachable) {
ptrdiff_t offset = EncodeOffset(branch, BufferOffset(label));
label->use(branch.getOffset());
MOZ_ASSERT(offset != kEndOfLabelUseList); return offset;
}
// The label already has a linked list of uses, but we can't reach the head // of the list with the allowed branch range. Insert this branch at a // different position in the list. // // Find an existing branch, exbr, such that: // // 1. The new branch can be reached by exbr, and either // 2a. The new branch can reach exbr's target, or // 2b. The exbr branch is at the end of the list. // // Then the new branch can be inserted after exbr in the linked list. // // We know that it is always possible to find an exbr branch satisfying these // conditions because of the PatchShortRangeBranchToVeneer() mechanism. All // branches are guaranteed to either be able to reach the end of the // assembler buffer, or they will be pointing to an unconditional branch that // can. // // In particular, the end of the list is always a viable candidate, so we'll // just get that.
BufferOffset next(label);
BufferOffset exbr; do {
exbr = next;
next = NextLink(next);
} while (next.assigned());
SetNextLink(exbr, branch);
// This branch becomes the new end of the list. return kEndOfLabelUseList;
}
// If the operation is NOT, invert the operation and immediate. if ((op & NOT) == NOT) {
op = static_cast<LogicalOp>(op & ~NOT);
immediate = rd.Is64Bits() ? ~immediate : (~immediate & kWRegMask);
}
unsigned n, imm_s, imm_r; if (IsImmLogical(immediate, reg_size, &n, &imm_s, &imm_r)) { // Immediate can be encoded in the instruction. return LogicalImmediate(rd, rn, n, imm_s, imm_r, op);
} else { // This case is handled in the macro assembler.
VIXL_UNREACHABLE();
}
} else {
VIXL_ASSERT(operand.IsShiftedRegister());
VIXL_ASSERT(operand.reg().size() == rd.size());
Instr dp_op = static_cast<Instr>(op | LogicalShiftedFixed); return DataProcShiftedRegister(rd, rn, operand, LeaveFlags, dp_op);
}
}
void MozBaseAssembler::InsertIndexIntoTag(uint8_t* load, uint32_t index) { // Store the js::jit::PoolEntry index into the instruction. // finishPool() will walk over all literal load instructions // and use PatchConstantPoolLoad() to patch to the final relative offset.
*((uint32_t*)load) |= Assembler::ImmLLiteral(index);
}
// The load currently contains the js::jit::PoolEntry's index, // as written by InsertIndexIntoTag().
uint32_t index = load->ImmLLiteral();
// Each entry in the literal pool is uint32_t-sized, // but literals may use multiple entries.
uint32_t* constPool = reinterpret_cast<uint32_t*>(constPoolAddr);
Instruction* source = reinterpret_cast<Instruction*>(&constPool[index]);
load->SetImmLLiteral(source); returnfalse; // Nothing uses the return value.
}
void
MozBaseAssembler::PatchShortRangeBranchToVeneer(ARMBuffer* buffer, unsigned rangeIdx,
BufferOffset deadline, BufferOffset veneer)
{ // Reconstruct the position of the branch from (rangeIdx, deadline).
vixl::ImmBranchRangeType branchRange = static_cast<vixl::ImmBranchRangeType>(rangeIdx);
BufferOffset branch(deadline.getOffset() - Instruction::ImmBranchMaxForwardOffset(branchRange));
Instruction *branchInst = buffer->getInst(branch);
Instruction *veneerInst = buffer->getInst(veneer);
// Verify that the branch range matches what's encoded.
MOZ_ASSERT(Instruction::ImmBranchTypeToRange(branchInst->BranchType()) == branchRange);
// We want to insert veneer after branch in the linked list of instructions // that use the same unbound label. // The veneer should be an unconditional branch.
ptrdiff_t nextElemOffset = branchInst->ImmPCRawOffset();
// If offset is 0, this is the end of the linked list. if (nextElemOffset != kEndOfLabelUseList) { // Make the offset relative to veneer so it targets the same instruction // as branchInst.
nextElemOffset *= kInstructionSize;
nextElemOffset += branch.getOffset() - veneer.getOffset();
nextElemOffset /= kInstructionSize;
}
Assembler::b(veneerInst, nextElemOffset);
// Now point branchInst at veneer. See also SetNextLink() above.
branchInst->SetImmPCRawOffset(EncodeOffset(branch, veneer));
}
struct PoolHeader {
uint32_t data;
struct Header { // The size should take into account the pool header. // The size is in units of Instruction (4bytes), not byte. union { struct {
uint32_t size : 15;
// "Natural" guards are part of the normal instruction stream, // while "non-natural" guards are inserted for the sole purpose // of skipping around a pool.
uint32_t isNatural : 1;
uint32_t ONES : 16;
};
uint32_t data;
};
// Get the total size of the pool. const uintptr_t totalPoolSize = sizeof(PoolHeader) + p->getPoolSize(); const uintptr_t totalPoolInstructions = totalPoolSize / kInstructionSize;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.