/* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- * vim: set ts=8 sts=2 et sw=2 tw=80: * This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
using mozilla::CountLeadingZeroes32; using mozilla::DebugOnly;
using LabelDoc = DisassemblerSpew::LabelDoc; using LiteralDoc = DisassemblerSpew::LiteralDoc;
void dbg_break() {}
// The ABIArgGenerator is used for making system ABI calls and for inter-wasm // calls. The system ABI can either be SoftFp or HardFp, and inter-wasm calls // are always HardFp calls. The initialization defaults to HardFp, and the ABI // choice is made before any system ABI calls with the method "setUseHardFp".
ABIArgGenerator::ABIArgGenerator()
: intRegIndex_(0),
floatRegIndex_(0),
stackOffset_(0),
current_(),
useHardFp_(true) {}
// See the "Parameter Passing" section of the "Procedure Call Standard for the // ARM Architecture" documentation.
ABIArg ABIArgGenerator::softNext(MIRType type) { switch (type) { case MIRType::Int32: case MIRType::Pointer: case MIRType::WasmAnyRef: case MIRType::WasmArrayData: case MIRType::StackResults: if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t); break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++; break; case MIRType::Int64: // Make sure to use an even register index. Increase to next even number // when odd.
intRegIndex_ = (intRegIndex_ + 1) & ~1; if (intRegIndex_ == NumIntArgRegs) { // Align the stack on 8 bytes. staticconst uint32_t align = sizeof(uint64_t) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t); break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_), Register::FromCode(intRegIndex_ + 1));
intRegIndex_ += 2; break; case MIRType::Float32: if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t); break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++; break; case MIRType::Double: // Make sure to use an even register index. Increase to next even number // when odd.
intRegIndex_ = (intRegIndex_ + 1) & ~1; if (intRegIndex_ == NumIntArgRegs) { // Align the stack on 8 bytes. staticconst uint32_t align = sizeof(double) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(double); break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_), Register::FromCode(intRegIndex_ + 1));
intRegIndex_ += 2; break; default:
MOZ_CRASH("Unexpected argument type");
}
return current_;
}
ABIArg ABIArgGenerator::hardNext(MIRType type) { switch (type) { case MIRType::Int32: case MIRType::Pointer: case MIRType::WasmAnyRef: case MIRType::WasmArrayData: case MIRType::StackResults: if (intRegIndex_ == NumIntArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t); break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_));
intRegIndex_++; break; case MIRType::Int64: // Make sure to use an even register index. Increase to next even number // when odd.
intRegIndex_ = (intRegIndex_ + 1) & ~1; if (intRegIndex_ == NumIntArgRegs) { // Align the stack on 8 bytes. staticconst uint32_t align = sizeof(uint64_t) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t); break;
}
current_ = ABIArg(Register::FromCode(intRegIndex_), Register::FromCode(intRegIndex_ + 1));
intRegIndex_ += 2; break; case MIRType::Float32: if (floatRegIndex_ == NumFloatArgRegs) {
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint32_t); break;
}
current_ = ABIArg(VFPRegister(floatRegIndex_, VFPRegister::Single));
floatRegIndex_++; break; case MIRType::Double: // Double register are composed of 2 float registers, thus we have to // skip any float register which cannot be used in a pair of float // registers in which a double value can be stored.
floatRegIndex_ = (floatRegIndex_ + 1) & ~1; if (floatRegIndex_ == NumFloatArgRegs) { staticconst uint32_t align = sizeof(double) - 1;
stackOffset_ = (stackOffset_ + align) & ~align;
current_ = ABIArg(stackOffset_);
stackOffset_ += sizeof(uint64_t); break;
}
current_ = ABIArg(VFPRegister(floatRegIndex_ >> 1, VFPRegister::Double));
floatRegIndex_ += 2; break; default:
MOZ_CRASH("Unexpected argument type");
}
// Encode a standard register when it is being used as src1, the dest, and an // extra register. These should never be called with an InvalidReg.
uint32_t js::jit::RT(Register r) {
MOZ_ASSERT((r.code() & ~0xf) == 0); return r.code() << 12;
}
// Encode a standard register when it is being used as src1, the dest, and an // extra register. For these, an InvalidReg is used to indicate a optional // register that has been omitted.
uint32_t js::jit::maybeRT(Register r) { if (r == InvalidReg) { return 0;
}
bool Assembler::reserve(size_t size) { // This buffer uses fixed-size chunks so there's no point in reserving // now vs. on-demand. return !oom();
}
bool Assembler::swapBuffer(wasm::Bytes& bytes) { // For now, specialize to the one use case. As long as wasm::Bytes is a // Vector, not a linked-list of chunks, there's not much we can do other // than copy.
MOZ_ASSERT(bytes.empty()); if (!bytes.resize(bytesNeeded())) { returnfalse;
}
m_buffer.executableCopy(bytes.begin()); returntrue;
}
if (inst1->is<InstBranchImm>()) { // See if we have a simple case, b #offset.
BOffImm imm;
InstBranchImm* jumpB = inst1->as<InstBranchImm>();
jumpB->extractImm(&imm); return imm.getDest(inst1)->raw();
}
if (inst1->is<InstMovW>()) { // See if we have the complex case: // movw r_temp, #imm1 // movt r_temp, #imm2 // bx r_temp // OR // movw r_temp, #imm1 // movt r_temp, #imm2 // str pc, [sp] // bx r_temp
Imm16 targ_bot;
Imm16 targ_top; Register temp;
// Extract both the temp register and the bottom immediate.
InstMovW* bottom = inst1->as<InstMovW>();
bottom->extractImm(&targ_bot);
bottom->extractDest(&temp);
// Extract the top part of the immediate.
Instruction* inst2 = iter->next();
MOZ_ASSERT(inst2->is<InstMovT>());
InstMovT* top = inst2->as<InstMovT>();
top->extractImm(&targ_top);
// Make sure they are being loaded into the same register.
MOZ_ASSERT(top->checkDest(temp));
// Make sure we're branching to the same register. #ifdef DEBUG // A toggled call sometimes has a NOP instead of a branch for the third // instruction. No way to assert that it's valid in that situation.
Instruction* inst3 = iter->next(); if (!inst3->is<InstNOP>()) {
InstBranchReg* realBranch = nullptr; if (inst3->is<InstBranchReg>()) {
realBranch = inst3->as<InstBranchReg>();
} else {
Instruction* inst4 = iter->next();
realBranch = inst4->as<InstBranchReg>();
}
MOZ_ASSERT(realBranch->checkDest(temp));
} #endif
if (load1->is<InstMovW>() && load2->is<InstMovT>()) { if (style) {
*style = L_MOVWT;
}
// See if we have the complex case: // movw r_temp, #imm1 // movt r_temp, #imm2
Imm16 targ_bot;
Imm16 targ_top; Register temp;
// Extract both the temp register and the bottom immediate.
InstMovW* bottom = load1->as<InstMovW>();
bottom->extractImm(&targ_bot);
bottom->extractDest(&temp);
// Extract the top part of the immediate.
InstMovT* top = load2->as<InstMovT>();
top->extractImm(&targ_top);
// Make sure they are being loaded into the same register.
MOZ_ASSERT(top->checkDest(temp));
Assembler::Condition Assembler::UnsignedCondition(Condition cond) { switch (cond) { case Zero: case NonZero: return cond; case LessThan: case Below: return Below; case LessThanOrEqual: case BelowOrEqual: return BelowOrEqual; case GreaterThan: case Above: return Above; case AboveOrEqual: case GreaterThanOrEqual: return AboveOrEqual; default:
MOZ_CRASH("unexpected condition");
}
}
Assembler::Condition Assembler::ConditionWithoutEqual(Condition cond) { switch (cond) { case LessThan: case LessThanOrEqual: return LessThan; case Below: case BelowOrEqual: return Below; case GreaterThan: case GreaterThanOrEqual: return GreaterThan; case Above: case AboveOrEqual: return Above; default:
MOZ_CRASH("unexpected condition");
}
}
Imm8::TwoImm8mData Imm8::EncodeTwoImms(uint32_t imm) { // In the ideal case, we are looking for a number that (in binary) looks // like: // 0b((00)*)n_1((00)*)n_2((00)*) // left n1 mid n2 // where both n_1 and n_2 fit into 8 bits. // Since this is being done with rotates, we also need to handle the case // that one of these numbers is in fact split between the left and right // sides, in which case the constant will look like: // 0bn_1a((00)*)n_2((00)*)n_1b // n1a mid n2 rgh n1b // Also remember, values are rotated by multiples of two, and left, mid or // right can have length zero.
uint32_t imm1, imm2; int left = CountLeadingZeroes32(imm) & 0x1E;
uint32_t no_n1 = imm & ~(0xff << (24 - left));
// Not technically needed: this case only happens if we can encode as a // single imm8m. There is a perfectly reasonable encoding in this case, but // we shouldn't encourage people to do things like this. if (no_n1 == 0) { return TwoImm8mData();
}
if (no_n2 == 0) { // We hit the easy case, no wraparound. // Note: a single constant *may* look like this. int imm1shift = left + 8; int imm2shift = mid + 8;
imm1 = (imm >> (32 - imm1shift)) & 0xff; if (imm2shift >= 32) {
imm2shift = 0; // This assert does not always hold, in fact, this would lead to // some incredibly subtle bugs. // assert((imm & 0xff) == no_n1);
imm2 = no_n1;
} else {
imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
MOZ_ASSERT(((no_n1 >> (32 - imm2shift)) | (no_n1 << imm2shift)) == imm2);
}
MOZ_ASSERT((imm1shift & 0x1) == 0);
MOZ_ASSERT((imm2shift & 0x1) == 0); return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
datastore::Imm8mData(imm2, imm2shift >> 1));
}
// Either it wraps, or it does not fit. If we initially chopped off more // than 8 bits, then it won't fit. if (left >= 8) { return TwoImm8mData();
}
int right = 32 - (CountLeadingZeroes32(no_n2) & 30); // All remaining set bits *must* fit into the lower 8 bits. // The right == 8 case should be handled by the previous case. if (right > 8) { return TwoImm8mData();
}
// Make sure the initial bits that we removed for no_n1 fit into the // 8-(32-right) leftmost bits. if (((imm & (0xff << (24 - left))) << (8 - right)) != 0) { // BUT we may have removed more bits than we needed to for no_n1 // 0x04104001 e.g. we can encode 0x104 with a single op, then 0x04000001 // with a second, but we try to encode 0x0410000 and find that we need a // second op for 0x4000, and 0x1 cannot be included in the encoding of // 0x04100000.
no_n1 = imm & ~((0xff >> (8 - right)) | (0xff << (24 + right)));
mid = CountLeadingZeroes32(no_n1) & 30;
no_n2 = no_n1 & ~((0xff << ((24 - mid) & 31)) | 0xff >> ((8 + mid) & 31)); if (no_n2 != 0) { return TwoImm8mData();
}
}
// Now assemble all of this information into a two coherent constants it is // a rotate right from the lower 8 bits. int imm1shift = 8 - right;
imm1 = 0xff & ((imm << imm1shift) | (imm >> (32 - imm1shift)));
MOZ_ASSERT((imm1shift & ~0x1e) == 0); // left + 8 + mid is the position of the leftmost bit of n_2. // We needed to rotate 0x000000ab right by 8 in order to get 0xab000000, // then shift again by the leftmost bit in order to get the constant that we // care about. int imm2shift = mid + 8;
imm2 = ((imm >> (32 - imm2shift)) | (imm << imm2shift)) & 0xff;
MOZ_ASSERT((imm1shift & 0x1) == 0);
MOZ_ASSERT((imm2shift & 0x1) == 0); return TwoImm8mData(datastore::Imm8mData(imm1, imm1shift >> 1),
datastore::Imm8mData(imm2, imm2shift >> 1));
}
ALUOp jit::ALUNeg(ALUOp op, Register dest, Register scratch, Imm32* imm, Register* negDest) { // Find an alternate ALUOp to get the job done, and use a different imm.
*negDest = dest; switch (op) { case OpMov:
*imm = Imm32(~imm->value); return OpMvn; case OpMvn:
*imm = Imm32(~imm->value); return OpMov; case OpAnd:
*imm = Imm32(~imm->value); return OpBic; case OpBic:
*imm = Imm32(~imm->value); return OpAnd; case OpAdd:
*imm = Imm32(-imm->value); return OpSub; case OpSub:
*imm = Imm32(-imm->value); return OpAdd; case OpCmp:
*imm = Imm32(-imm->value); return OpCmn; case OpCmn:
*imm = Imm32(-imm->value); return OpCmp; case OpTst:
MOZ_ASSERT(dest == InvalidReg);
*imm = Imm32(~imm->value);
*negDest = scratch; return OpBic; // orr has orn on thumb2 only. default: return OpInvalid;
}
}
bool jit::can_dbl(ALUOp op) { // Some instructions can't be processed as two separate instructions such as // and, and possibly add (when we're setting ccodes). There is also some // hilarity with *reading* condition codes. For example, adc dest, src1, // 0xfff; (add with carry) can be split up into adc dest, src1, 0xf00; add // dest, dest, 0xff, since "reading" the condition code increments the // result by one conditionally, that only needs to be done on one of the two // instructions. switch (op) { case OpBic: case OpAdd: case OpSub: case OpEor: case OpOrr: returntrue; default: returnfalse;
}
}
bool jit::condsAreSafe(ALUOp op) { // Even when we are setting condition codes, sometimes we can get away with // splitting an operation into two. For example, if our immediate is // 0x00ff00ff, and the operation is eors we can split this in half, since x // ^ 0x00ff0000 ^ 0x000000ff should set all of its condition codes exactly // the same as x ^ 0x00ff00ff. However, if the operation were adds, we // cannot split this in half. If the source on the add is 0xfff00ff0, the // result sholud be 0xef10ef, but do we set the overflow bit or not? // Depending on which half is performed first (0x00ff0000 or 0x000000ff) the // V bit will be set differently, and *not* updating the V bit would be // wrong. Theoretically, the following should work: // adds r0, r1, 0x00ff0000; // addsvs r0, r1, 0x000000ff; // addvc r0, r1, 0x000000ff; // But this is 3 instructions, and at that point, we might as well use // something else. switch (op) { case OpBic: case OpOrr: case OpEor: returntrue; default: returnfalse;
}
}
ALUOp jit::getDestVariant(ALUOp op) { // All of the compare operations are dest-less variants of a standard // operation. Given the dest-less variant, return the dest-ful variant. switch (op) { case OpCmp: return OpSub; case OpCmn: return OpAdd; case OpTst: return OpAnd; case OpTeq: return OpEor; default: return op;
}
}
Instruction* BOffImm::getDest(Instruction* src) const { // TODO: It is probably worthwhile to verify that src is actually a branch. // NOTE: This does not explicitly shift the offset of the destination left by // 2, since it is indexing into an array of instruction sized objects. return &src[((int32_t(data_) << 8) >> 8) + 2];
}
// Size of the instruction stream, in bytes. Including pools. This function // expects all pools that need to be placed have been placed. If they haven't // then we need to go an flush the pools :(
size_t Assembler::size() const { return m_buffer.size(); } // Size of the relocation table, in bytes.
size_t Assembler::jumpRelocationTableBytes() const { return jumpRelocations_.length();
}
size_t Assembler::dataRelocationTableBytes() const { return dataRelocations_.length();
}
// Size of the data table, in bytes.
size_t Assembler::bytesNeeded() const { return size() + jumpRelocationTableBytes() + dataRelocationTableBytes();
}
// Allocate memory for a branch instruction, it will be overwritten // subsequently and should not be disassembled.
static uint32_t EncodeMovW(Register dest, Imm16 imm, Assembler::Condition c) {
MOZ_ASSERT(ARMFlags::HasMOVWT()); return 0x03000000 | c | imm.encode() | RD(dest);
}
static uint32_t EncodeMovT(Register dest, Imm16 imm, Assembler::Condition c) {
MOZ_ASSERT(ARMFlags::HasMOVWT()); return 0x03400000 | c | imm.encode() | RD(dest);
}
// Not quite ALU worthy, but these are useful none the less. These also have // the isue of these being formatted completly differently from the standard ALU // operations.
BufferOffset Assembler::as_movw(Register dest, Imm16 imm, Condition c) { return writeInst(EncodeMovW(dest, imm, c));
}
class PoolHintData { public: enum LoadType { // Set 0 to bogus, since that is the value most likely to be // accidentally left somewhere.
PoolBOGUS = 0,
PoolDTR = 1,
PoolBranch = 2,
PoolVDTR = 3
};
LoadType getLoadType() const { // If this *was* a PoolBranch, but the branch has already been bound // then this isn't going to look like a real poolhintdata, but we still // want to lie about it so everyone knows it *used* to be a branch. if (ONES != ExpectedOnes) { return PoolHintData::PoolBranch;
} returnstatic_cast<LoadType>(loadType_);
}
bool isValidPoolHint() const { // Most instructions cannot have a condition that is 0xf. Notable // exceptions are blx and the entire NEON instruction set. For the // purposes of pool loads, and possibly patched branches, the possible // instructions are ldr and b, neither of which can have a condition // code of 0xf. return ONES == ExpectedOnes;
}
};
union PoolHintPun {
PoolHintData phd;
uint32_t raw;
};
// Handles all of the other integral data transferring functions: ldrsb, ldrsh, // ldrd, etc. The size is given in bits.
BufferOffset Assembler::as_extdtr(LoadStore ls, int size, bool IsSigned,
Index mode, Register rt, EDtrAddr addr,
Condition c) { int extra_bits2 = 0; int extra_bits1 = 0; switch (size) { case 8:
MOZ_ASSERT(IsSigned);
MOZ_ASSERT(ls != IsStore);
extra_bits1 = 0x1;
extra_bits2 = 0x2; break; case 16: // 'case 32' doesn't need to be handled, it is handled by the default // ldr/str.
extra_bits2 = 0x01;
extra_bits1 = (ls == IsStore) ? 0 : 1; if (IsSigned) {
MOZ_ASSERT(ls != IsStore);
extra_bits2 |= 0x2;
} break; case 64:
extra_bits2 = (ls == IsStore) ? 0x3 : 0x2;
extra_bits1 = 0; break; default:
MOZ_CRASH("unexpected size in as_extdtr");
} return writeInst(extra_bits2 << 5 | extra_bits1 << 20 | 0x90 | addr.encode() |
RT(rt) | mode | c);
}
BufferOffset Assembler::as_dtm(LoadStore ls, Register rn, uint32_t mask,
DTMMode mode, DTMWriteBack wb, Condition c) { return writeInst(0x08000000 | RN(rn) | ls | mode | mask | c | wb);
}
BufferOffset Assembler::as_FImm32Pool(VFPRegister dest, float f, Condition c) { // Insert floats into the double pool as they have the same limitations on // immediate offset. This wastes 4 bytes padding per float. An alternative // would be to have a separate pool for floats.
MOZ_ASSERT(dest.isSingle());
PoolHintPun php;
php.phd.init(0, c, PoolHintData::PoolVDTR, dest); return allocLiteralLoadEntry(1, 1, php, (uint8_t*)&f, LiteralDoc(f));
}
// patchConstantPoolLoad takes the address of the instruction that wants to be // patched, and the address of the start of the constant pool, and figures // things out from there. void Assembler::PatchConstantPoolLoad(void* loadAddr, void* constPoolAddr) {
PoolHintData data = *(PoolHintData*)loadAddr;
uint32_t* instAddr = (uint32_t*)loadAddr; int offset = (char*)constPoolAddr - (char*)loadAddr; switch (data.getLoadType()) { case PoolHintData::PoolBOGUS:
MOZ_CRASH("bogus load type!"); case PoolHintData::PoolDTR:
Assembler::as_dtr_patch(
IsLoad, 32, Offset, data.getReg(),
DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
data.getCond(), instAddr); break; case PoolHintData::PoolBranch: // Either this used to be a poolBranch, and the label was already bound, // so it was replaced with a real branch, or this may happen in the // future. If this is going to happen in the future, then the actual // bits that are written here don't matter (except the condition code, // since that is always preserved across patchings) but if it does not // get bound later, then we want to make sure this is a load from the // pool entry (and the pool entry should be nullptr so it will crash). if (data.isValidPoolHint()) {
Assembler::as_dtr_patch(
IsLoad, 32, Offset, pc,
DTRAddr(pc, DtrOffImm(offset + 4 * data.getIndex() - 8)),
data.getCond(), instAddr);
} break; case PoolHintData::PoolVDTR: {
VFPRegister dest = data.getVFPReg();
int32_t imm = offset + (data.getIndex() * 4) - 8;
MOZ_ASSERT(-1024 < imm && imm < 1024);
Assembler::as_vdtr_patch(IsLoad, dest, VFPAddr(pc, VFPOffImm(imm)),
data.getCond(), instAddr); break;
}
}
}
// bx can *only* branch to a register, never to an immediate.
BufferOffset Assembler::as_bx(Register r, Condition c) {
BufferOffset ret = writeInst(((int)c) | OpBx | r.code()); return ret;
}
// Branch can branch to an immediate *or* to a register. // Branches to immediates are pc relative, branches to registers are absolute.
BufferOffset Assembler::as_b(BOffImm off, Condition c, Label* documentation) { return writeBranchInst(((int)c) | OpB | off.encode(),
refLabel(documentation));
}
BufferOffset Assembler::as_b(Label* l, Condition c) { if (l->bound()) { // Note only one instruction is emitted here, the NOP is overwritten.
BufferOffset ret = allocBranchInst(); if (oom()) { return BufferOffset();
}
BufferOffset ret; if (l->used()) {
int32_t old = l->offset();
MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old), "Buffer size limit should prevent this");
ret = as_b(BOffImm(old), c, l);
} else {
BOffImm inv;
ret = as_b(inv, c, l);
}
if (oom()) { return BufferOffset();
}
l->use(ret.getOffset()); return ret;
}
BufferOffset Assembler::as_b(BOffImm off, Condition c, BufferOffset inst) { // JS_DISASM_ARM NOTE: Can't disassemble here, because numerous callers use // this to patchup old code. Must disassemble in caller where it makes sense. // Not many callers.
*editSrc(inst) = InstBImm(off, c); return inst;
}
// blx can go to either an immediate or a register. // When blx'ing to a register, we change processor state depending on the low // bit of the register when blx'ing to an immediate, we *always* change // processor state.
// bl can only branch to an pc-relative immediate offset // It cannot change the processor state.
BufferOffset Assembler::as_bl(BOffImm off, Condition c, Label* documentation) { return writeBranchInst(((int)c) | OpBl | off.encode(),
refLabel(documentation));
}
BufferOffset Assembler::as_bl(Label* l, Condition c) { if (l->bound()) { // Note only one instruction is emitted here, the NOP is overwritten.
BufferOffset ret = allocBranchInst(); if (oom()) { return BufferOffset();
}
BufferOffset ret; // See if the list was empty. if (l->used()) {
int32_t old = l->offset();
MOZ_RELEASE_ASSERT(BOffImm::IsInRange(old), "Buffer size limit should prevent this");
ret = as_bl(BOffImm(old), c, l);
} else {
BOffImm inv;
ret = as_bl(inv, c, l);
}
BufferOffset Assembler::as_msr(Register r, Condition c) { // Hardcode the 'mask' field to 0b11 for now. It is bits 18 and 19, which // are the two high bits of the 'c' in this constant.
MOZ_ASSERT((r.code() & ~0xf) == 0); return writeInst(0x012cf000 | int(c) | r.code());
}
// Unityped variants: all registers hold the same (ieee754 single/double) // notably not included are vcvt; vmov vd, #imm; vmov rt, vn.
BufferOffset Assembler::as_vfp_float(VFPRegister vd, VFPRegister vn,
VFPRegister vm, VFPOp op, Condition c) { // Make sure we believe that all of our operands are the same kind.
MOZ_ASSERT_IF(!vn.isMissing(), vd.equiv(vn));
MOZ_ASSERT_IF(!vm.isMissing(), vd.equiv(vm));
vfp_size sz = vd.isDouble() ? IsDouble : IsSingle; return writeVFPInst(sz, VD(vd) | VN(vn) | VM(vm) | op | VfpArith | c);
}
// Specifically, a move between two same sized-registers.
BufferOffset Assembler::as_vmov(VFPRegister vd, VFPRegister vsrc, Condition c) { return as_vfp_float(vd, NoVFPRegister, vsrc, OpvMov, c);
}
// Transfer between Core and VFP.
// Unlike the next function, moving between the core registers and vfp registers // can't be *that* properly typed. Namely, since I don't want to munge the type // VFPRegister to also include core registers. Thus, the core and vfp registers // are passed in based on their type, and src/dest is determined by the // float2core.
BufferOffset Assembler::as_vxfer(Register vt1, Register vt2, VFPRegister vm,
FloatToCore_ f2c, Condition c, int idx) {
vfp_size sz = IsSingle; if (vm.isDouble()) { // Technically, this can be done with a vmov à la ARM ARM under vmov // however, that requires at least an extra bit saying if the operation // should be performed on the lower or upper half of the double. Moving // a single to/from 2N/2N+1 isn't equivalent, since there are 32 single // registers, and 32 double registers so there is no way to encode the // last 16 double registers.
sz = IsDouble;
MOZ_ASSERT(idx == 0 || idx == 1); // If we are transferring a single half of the double then it must be // moving a VFP reg to a core reg.
--> --------------------
--> maximum size reached
--> --------------------
Messung V0.5
¤ Dauer der Verarbeitung: 0.24 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.