/* * Copyright (c) 1997, 2022, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2015 SAP SE. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. *
*/
int AbstractAssembler::code_fill_byte() { return 0x00; // illegal instruction 0x00000000
}
// Patch instruction `inst' at offset `inst_pos' to refer to // `dest_pos' and return the resulting instruction. We should have // pcs, not offsets, but since all is relative, it will work out fine. int Assembler::patched_branch(int dest_pos, int inst, int inst_pos) { int m = 0; // mask for displacement field int v = 0; // new value for displacement field
switch (inv_op_ppc(inst)) { case b_op: m = li(-1); v = li(disp(dest_pos, inst_pos)); break; case bc_op: m = bd(-1); v = bd(disp(dest_pos, inst_pos)); break; default: ShouldNotReachHere();
} return inst & ~m | v;
}
// Return the offset, relative to _code_begin, of the destination of // the branch inst at offset pos. int Assembler::branch_destination(int inst, int pos) { int r = 0; switch (inv_op_ppc(inst)) { case b_op: r = bxx_destination_offset(inst, pos); break; case bc_op: r = inv_bd_field(inst, pos); break; default: ShouldNotReachHere();
} return r;
}
// Load a 64 bit constant. Patchable. void Assembler::load_const(Register d, long x, Register tmp) { // 64-bit value: x = xa xb xc xd int xa = (x >> 48) & 0xffff; int xb = (x >> 32) & 0xffff; int xc = (x >> 16) & 0xffff; int xd = (x >> 0) & 0xffff; if (tmp == noreg) {
Assembler::lis( d, (int)(short)xa);
Assembler::ori( d, d, (unsignedint)xb);
Assembler::sldi(d, d, 32);
Assembler::oris(d, d, (unsignedint)xc);
Assembler::ori( d, d, (unsignedint)xd);
} else { // exploit instruction level parallelism if we have a tmp register
assert_different_registers(d, tmp);
Assembler::lis(tmp, (int)(short)xa);
Assembler::lis(d, (int)(short)xc);
Assembler::ori(tmp, tmp, (unsignedint)xb);
Assembler::ori(d, d, (unsignedint)xd);
Assembler::insrdi(d, tmp, 32, 0);
}
}
// Load a 64 bit constant, optimized, not identifiable. // Tmp can be used to increase ILP. Set return_simm16_rest=true to get a // 16 bit immediate offset. int Assembler::load_const_optimized(Register d, long x, Register tmp, bool return_simm16_rest) { // Avoid accidentally trying to use R0 for indexed addressing.
assert_different_registers(d, tmp);
short xa, xb, xc, xd; // Four 16-bit chunks of const. long rem = x; // Remaining part of const.
xd = rem & 0xFFFF; // Lowest 16-bit chunk.
rem = (rem >> 16) + ((unsignedshort)xd >> 15); // Compensation for sign extend.
int retval = 0; if (return_simm16_rest) {
retval = xd;
x = rem << 16;
xd = 0;
}
if (d == R0) { // Can't use addi. if (is_simm(x, 32)) { // opt 2: simm32
lis(d, x >> 16); if (xd) ori(d, d, (unsignedshort)xd);
} else { // 64-bit value: x = xa xb xc xd
xa = (x >> 48) & 0xffff;
xb = (x >> 32) & 0xffff;
xc = (x >> 16) & 0xffff; bool xa_loaded = (xb & 0x8000) ? (xa != -1) : (xa != 0); if (tmp == noreg || (xc == 0 && xd == 0)) { if (xa_loaded) {
lis(d, xa); if (xb) { ori(d, d, (unsignedshort)xb); }
} else {
li(d, xb);
}
sldi(d, d, 32); if (xc) { oris(d, d, (unsignedshort)xc); } if (xd) { ori( d, d, (unsignedshort)xd); }
} else { // Exploit instruction level parallelism if we have a tmp register. bool xc_loaded = (xd & 0x8000) ? (xc != -1) : (xc != 0); if (xa_loaded) {
lis(tmp, xa);
} if (xc_loaded) {
lis(d, xc);
} if (xa_loaded) { if (xb) { ori(tmp, tmp, (unsignedshort)xb); }
} else {
li(tmp, xb);
} if (xc_loaded) { if (xd) { ori(d, d, (unsignedshort)xd); }
} else {
li(d, xd);
}
insrdi(d, tmp, 32, 0);
}
} return retval;
}
xc = rem & 0xFFFF; // Next 16-bit chunk.
rem = (rem >> 16) + ((unsignedshort)xc >> 15); // Compensation for sign extend.
if (rem == 0) { // opt 2: simm32
lis(d, xc);
} else { // High 32 bits needed.
if (tmp != noreg && (int)x != 0) { // opt 3: We have a temp reg. // No carry propagation between xc and higher chunks here (use logical instructions).
xa = (x >> 48) & 0xffff;
xb = (x >> 32) & 0xffff; // No sign compensation, we use lis+ori or li to allow usage of R0. bool xa_loaded = (xb & 0x8000) ? (xa != -1) : (xa != 0); bool return_xd = false;
if (xa_loaded) { lis(tmp, xa); } if (xc) { lis(d, xc); } if (xa_loaded) { if (xb) { ori(tmp, tmp, (unsignedshort)xb); } // No addi, we support tmp == R0.
} else {
li(tmp, xb);
} if (xc) { if (xd) { addi(d, d, xd); }
} else {
li(d, xd);
}
insrdi(d, tmp, 32, 0); return retval;
}
xb = rem & 0xFFFF; // Next 16-bit chunk.
rem = (rem >> 16) + ((unsignedshort)xb >> 15); // Compensation for sign extend.
// We emit only one addition to s to optimize latency. int Assembler::add_const_optimized(Register d, Register s, long x, Register tmp, bool return_simm16_rest) {
assert(s != R0 && s != tmp, "unsupported"); long rem = x;
// Case 1: Can use mr or addi. short xd = rem & 0xFFFF; // Lowest 16-bit chunk.
rem = (rem >> 16) + ((unsignedshort)xd >> 15); if (rem == 0) { if (xd == 0) { if (d != s) { mr(d, s); } return 0;
} if (return_simm16_rest && (d == s)) { return xd;
}
addi(d, s, xd); return 0;
}
// Case 2: Can use addis. if (xd == 0) { short xc = rem & 0xFFFF; // 2nd 16-bit chunk.
rem = (rem >> 16) + ((unsignedshort)xc >> 15); if (rem == 0) {
addis(d, s, xc); return 0;
}
}
// Other cases: load & add. Register tmp1 = tmp,
tmp2 = noreg; if ((d != tmp) && (d != s)) { // Can use d.
tmp1 = d;
tmp2 = tmp;
} int simm16_rest = load_const_optimized(tmp1, x, tmp2, return_simm16_rest);
add(d, tmp1, s); return simm16_rest;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung ist noch experimentell.