/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
// Last one in the slice.
BufferOffset off5 = ab.putInt(1000021);
CHECK_EQUAL(off5.getOffset(), 16);
CHECK_EQUAL(ab.size(), 20u);
CHECK_EQUAL(ab.nextOffset().getOffset(), 20);
CHECK(dls.empty());
CHECK(alloc.isEmpty()); // Constructor must be infallible.
CHECK_EQUAL(dls.size(), 0u);
CHECK_EQUAL(dls.maxRangeSize(), 0u);
// Removing non-existant deadline is OK.
dls.removeDeadline(1, BufferOffset(7));
// Add deadlines in increasing order as intended. This is optimal.
dls.addDeadline(1, BufferOffset(10));
CHECK(!dls.empty());
CHECK_EQUAL(dls.size(), 1u);
CHECK_EQUAL(dls.maxRangeSize(), 1u);
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
// Removing non-existant deadline is OK.
dls.removeDeadline(1, BufferOffset(7));
dls.removeDeadline(1, BufferOffset(17));
dls.removeDeadline(0, BufferOffset(10));
CHECK_EQUAL(dls.size(), 1u);
CHECK_EQUAL(dls.maxRangeSize(), 1u);
// Two identical deadlines for different ranges.
dls.addDeadline(2, BufferOffset(10));
CHECK(!dls.empty());
CHECK_EQUAL(dls.size(), 2u);
CHECK_EQUAL(dls.maxRangeSize(), 1u);
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
// It doesn't matter which range earliestDeadlineRange() reports first, // but it must report both. if (dls.earliestDeadlineRange() == 1) {
dls.removeDeadline(1, BufferOffset(10));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
} else {
CHECK_EQUAL(dls.earliestDeadlineRange(), 2u);
dls.removeDeadline(2, BufferOffset(10));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
CHECK_EQUAL(dls.earliestDeadlineRange(), 1u);
}
// Add deadline which is the front of range 0, but not the global earliest.
dls.addDeadline(0, BufferOffset(20));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
CHECK(dls.earliestDeadlineRange() > 0);
// Non-optimal add to front of single-entry range 0.
dls.addDeadline(0, BufferOffset(15));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
CHECK(dls.earliestDeadlineRange() > 0);
// Append to 2-entry range 0.
dls.addDeadline(0, BufferOffset(30));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
CHECK(dls.earliestDeadlineRange() > 0);
// Prepend, stealing earliest from other range.
dls.addDeadline(0, BufferOffset(5));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
// Remove central element.
dls.removeDeadline(0, BufferOffset(20));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 5);
CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
// Remove front, giving back the lead.
dls.removeDeadline(0, BufferOffset(5));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 10);
CHECK(dls.earliestDeadlineRange() > 0);
// Remove front, giving back earliest to range 0.
dls.removeDeadline(dls.earliestDeadlineRange(), BufferOffset(10));
CHECK_EQUAL(dls.earliestDeadline().getOffset(), 15);
CHECK_EQUAL(dls.earliestDeadlineRange(), 0u);
// Each slice holds 5 instructions. Trigger a constant pool inside the slice.
uint32_t poolLoad[] = {0xc0cc0000};
uint32_t poolData[] = {0xdddd0000, 0xdddd0001, 0xdddd0002, 0xdddd0003};
AsmBufWithPool::PoolEntry pe;
BufferOffset load =
ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
CHECK_EQUAL(pe.index(), 0u);
CHECK_EQUAL(load.getOffset(), 0);
// Pool hasn't been emitted yet. Load has been patched by // InsertIndexIntoTag.
CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000);
// Expected layout: // // 0: load [pc+16] // 4: 0x22220001 // 8: guard branch pc+12 // 12: pool header // 16: poolData // 20: 0x22220002 //
ab.putInt(0x22220001); // One could argue that the pool should be flushed here since there is no // more room. However, the current implementation doesn't dump pool until // asked to add data:
ab.putInt(0x22220002);
// allocEntry() overwrites the load instruction! Restore the original.
poolLoad[0] = 0xc0cc0000;
// Now try with load and pool data on separate slices.
load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
CHECK_EQUAL(pe.index(), 1u); // Global pool entry index.
CHECK_EQUAL(load.getOffset(), 24);
CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
ab.putInt(0x22220001);
ab.putInt(0x22220002);
CHECK_EQUAL(*ab.getInst(BufferOffset(24)), 0xc2cc0010u);
CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0x22220001u);
CHECK_EQUAL(*ab.getInst(BufferOffset(32)), 0xb0bb000cu);
CHECK_EQUAL(*ab.getInst(BufferOffset(36)), 0xffff0004u);
CHECK_EQUAL(*ab.getInst(BufferOffset(40)), 0xdddd0000u);
CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220002u);
// Two adjacent loads to the same pool.
poolLoad[0] = 0xc0cc0000;
load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
CHECK_EQUAL(pe.index(), 2u); // Global pool entry index.
CHECK_EQUAL(load.getOffset(), 48);
CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
poolLoad[0] = 0xc0cc0000;
load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)(poolData + 1), &pe);
CHECK_EQUAL(pe.index(), 3u); // Global pool entry index.
CHECK_EQUAL(load.getOffset(), 52);
CHECK_EQUAL(*ab.getInst(load), 0xc1cc0001); // Index into current pool.
// Two loads as above, but the first load has an 8-byte pool entry, and the // second load wouldn't be able to reach its data. This must produce two // pools.
poolLoad[0] = 0xc0cc0000;
load = ab.allocEntry(1, 2, (uint8_t*)poolLoad, (uint8_t*)(poolData + 2), &pe);
CHECK_EQUAL(pe.index(), 4u); // Global pool entry index.
CHECK_EQUAL(load.getOffset(), 76);
CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
poolLoad[0] = 0xc0cc0000;
load = ab.allocEntry(1, 1, (uint8_t*)poolLoad, (uint8_t*)poolData, &pe);
CHECK_EQUAL(pe.index(),
6u); // Global pool entry index. (Prev one is two indexes).
CHECK_EQUAL(load.getOffset(), 96);
CHECK_EQUAL(*ab.getInst(load), 0xc1cc0000); // Index into current pool.
// Second pool is not flushed yet, and there is room for one instruction // after the load. Test the keep-together feature.
ab.enterNoPool(2);
ab.putInt(0x22220006);
ab.putInt(0x22220007);
ab.leaveNoPool();
// Second short-range branch that will be swiped up by hysteresis.
BufferOffset br2 = ab.putInt(0xb1bb0d2d);
ab.registerBranchDeadline(
1, BufferOffset(br2.getOffset() + TestAssembler::BranchRange));
// Branch should not have been patched yet here.
CHECK_EQUAL(*ab.getInst(br1), 0xb1bb00cc);
CHECK_EQUAL(*ab.getInst(br2), 0xb1bb0d2d);
// Cancel one of the pending branches. // This is what will happen to most branches as they are bound before // expiring by Assembler::bind().
ab.unregisterBranchDeadline(
1, BufferOffset(off.getOffset() + TestAssembler::BranchRange));
off = ab.putInt(0x22220006); // Here we may or may not have patched the branch yet, but it is inevitable // now: // // 0: br1 pc+36 // 4: 0x22220001 // 8: 0x22220002 (unpatched) // 12: 0x22220003 // 16: 0x22220004 // 20: br2 pc+20 // 24: 0x22220006
CHECK_EQUAL(off.getOffset(), 24); // 28: guard branch pc+16 // 32: pool header // 36: veneer1 // 40: veneer2 // 44: 0x22220007
off = ab.putInt(0x22220007);
CHECK_EQUAL(off.getOffset(), 44);
// Now the branch must have been patched.
CHECK_EQUAL(*ab.getInst(br1), 0xb3bb0000 + 36); // br1 pc+36 (patched)
CHECK_EQUAL(*ab.getInst(BufferOffset(8)),
0x22220002u); // 0x22220002 (unpatched)
CHECK_EQUAL(*ab.getInst(br2), 0xb3bb0000 + 20); // br2 pc+20 (patched)
CHECK_EQUAL(*ab.getInst(BufferOffset(28)), 0xb0bb0010u); // br pc+16 (guard)
CHECK_EQUAL(*ab.getInst(BufferOffset(32)),
0xffff0000u); // pool header 0 bytes.
CHECK_EQUAL(*ab.getInst(BufferOffset(36)),
0xb2bb00ccu); // veneer1 w/ original 'cc' offset.
CHECK_EQUAL(*ab.getInst(BufferOffset(40)),
0xb2bb0d2du); // veneer2 w/ original 'd2d' offset.
CHECK_EQUAL(*ab.getInst(BufferOffset(44)), 0x22220007u);
// Branches can reach the label, but the linked list of uses needs to be // rearranged. The final conditional branch cannot reach the first branch.
Label lab2a;
Label lab2b;
masm.bind(&lab2a);
masm.B(&lab2b); // Generate 1,100,000 bytes of NOPs. for (unsigned n = 0; n < 1100000; n += 4) {
masm.Nop();
}
masm.branch(Assembler::LessThan, &lab2b);
masm.bind(&lab2b);
CHECK_EQUAL(
masm.getInstructionAt(BufferOffset(lab2a.offset()))->InstructionBits(),
vixl::B | vixl::Assembler::ImmUncondBranch(1100000 / 4 + 2));
CHECK_EQUAL(masm.getInstructionAt(BufferOffset(lab2b.offset() - 4))
->InstructionBits(),
vixl::B_cond | vixl::Assembler::ImmCondBranch(1) | vixl::lt);
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.