/** * DOC: batch buffer command parser * * Motivation: * Certain OpenGL features (e.g. transform feedback, performance monitoring) * require userspace code to submit batches containing commands such as * MI_LOAD_REGISTER_IMM to access various registers. Unfortunately, some * generations of the hardware will noop these commands in "unsecure" batches * (which includes all userspace batches submitted via i915) even though the * commands may be safe and represent the intended programming model of the * device. * * The software command parser is similar in operation to the command parsing * done in hardware for unsecure batches. However, the software parser allows * some operations that would be noop'd by hardware, if the parser determines * the operation is safe, and submits the batch as "secure" to prevent hardware * parsing. * * Threats: * At a high level, the hardware (and software) checks attempt to prevent * granting userspace undue privileges. There are three categories of privilege. * * First, commands which are explicitly defined as privileged or which should * only be used by the kernel driver. The parser rejects such commands * * Second, commands which access registers. To support correct/enhanced * userspace functionality, particularly certain OpenGL extensions, the parser * provides a whitelist of registers which userspace may safely access * * Third, commands which access privileged memory (i.e. GGTT, HWS page, etc). * The parser always rejects such commands. * * The majority of the problematic commands fall in the MI_* range, with only a * few specific commands on each engine (e.g. PIPE_CONTROL and MI_FLUSH_DW). * * Implementation: * Each engine maintains tables of commands and registers which the parser * uses in scanning batch buffers submitted to that engine. * * Since the set of commands that the parser must check for is significantly * smaller than the number of commands supported, the parser tables contain only * those commands required by the parser. This generally works because command * opcode ranges have standard command length encodings. So for commands that * the parser does not need to check, it can easily skip them. This is * implemented via a per-engine length decoding vfunc. * * Unfortunately, there are a number of commands that do not follow the standard * length encoding for their opcode range, primarily amongst the MI_* commands. * To handle this, the parser provides a way to define explicit "skip" entries * in the per-engine command tables. * * Other command table entries map fairly directly to high level categories * mentioned above: rejected, register whitelist. The parser implements a number * of checks, including the privileged memory checks, via a general bitmasking * mechanism.
*/
/* * A command that requires special handling by the command parser.
*/ struct drm_i915_cmd_descriptor { /* * Flags describing how the command parser processes the command. * * CMD_DESC_FIXED: The command has a fixed length if this is set, * a length mask if not set * CMD_DESC_SKIP: The command is allowed but does not follow the * standard length encoding for the opcode range in * which it falls * CMD_DESC_REJECT: The command is never allowed * CMD_DESC_REGISTER: The command should be checked against the * register whitelist for the appropriate ring
*/
u32 flags; #define CMD_DESC_FIXED (1<<0) #define CMD_DESC_SKIP (1<<1) #define CMD_DESC_REJECT (1<<2) #define CMD_DESC_REGISTER (1<<3) #define CMD_DESC_BITMASK (1<<4)
/* * The command's unique identification bits and the bitmask to get them. * This isn't strictly the opcode field as defined in the spec and may * also include type, subtype, and/or subop fields.
*/ struct {
u32 value;
u32 mask;
} cmd;
/* * The command's length. The command is either fixed length (i.e. does * not include a length field) or has a length field mask. The flag * CMD_DESC_FIXED indicates a fixed length. Otherwise, the command has * a length mask. All command entries in a command table must include * length information.
*/ union {
u32 fixed;
u32 mask;
} length;
/* * Describes where to find a register address in the command to check * against the ring's register whitelist. Only valid if flags has the * CMD_DESC_REGISTER bit set. * * A non-zero step value implies that the command may access multiple * registers in sequence (e.g. LRI), in that case step gives the * distance in dwords between individual offset fields.
*/ struct {
u32 offset;
u32 mask;
u32 step;
} reg;
#define MAX_CMD_DESC_BITMASKS 3 /* * Describes command checks where a particular dword is masked and * compared against an expected value. If the command does not match * the expected value, the parser rejects it. Only valid if flags has * the CMD_DESC_BITMASK bit set. Only entries where mask is non-zero * are valid. * * If the check specifies a non-zero condition_mask then the parser * only performs the check when the bits specified by condition_mask * are non-zero.
*/ struct {
u32 offset;
u32 mask;
u32 expected;
u32 condition_offset;
u32 condition_mask;
} bits[MAX_CMD_DESC_BITMASKS];
};
/* * A table of commands requiring special handling by the command parser. * * Each engine has an array of tables. Each table consists of an array of * command descriptors, which must be sorted with command opcodes in * ascending order.
*/ struct drm_i915_cmd_table { conststruct drm_i915_cmd_descriptor *table; int count;
};
/* Convenience macros to compress the tables */ #define SMI STD_MI_OPCODE_SHIFT #define S3D STD_3D_OPCODE_SHIFT #define S2D STD_2D_OPCODE_SHIFT #define SMFX STD_MFX_OPCODE_SHIFT #define F true #define S CMD_DESC_SKIP #define R CMD_DESC_REJECT #define W CMD_DESC_REGISTER #define B CMD_DESC_BITMASK
/* Command Mask Fixed Len Action
---------------------------------------------------------- */ staticconststruct drm_i915_cmd_descriptor gen7_common_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, R ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, R ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
CMD( MI_SEMAPHORE_MBOX, SMI, !F, 0xFF, R ),
CMD( MI_STORE_DWORD_INDEX, SMI, !F, 0xFF, R ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_STORE_REGISTER_MEM, SMI, F, 3, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ),
CMD( MI_LOAD_REGISTER_MEM, SMI, F, 3, W | B,
.reg = { .offset = 1, .mask = 0x007FFFFC },
.bits = {{
.offset = 0,
.mask = MI_GLOBAL_GTT,
.expected = 0,
}}, ), /* * MI_BATCH_BUFFER_START requires some special handling. It's not * really a 'skip' action but it doesn't seem like it's worth adding * a new action. See intel_engine_cmd_parser().
*/
CMD( MI_BATCH_BUFFER_START, SMI, !F, 0xFF, S ),
};
staticconststruct drm_i915_cmd_descriptor hsw_blt_cmds[] = {
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, R ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, R ),
};
/* * For Gen9 we can still rely on the h/w to enforce cmd security, and only * need to re-enforce the register access checks. We therefore only need to * teach the cmdparser how to find the end of each command, and identify * register accesses. The table doesn't need to reject any commands, and so * the only commands listed here are: * 1) Those that touch registers * 2) Those that do not have the default 8-bit length * * Note that the default MI length mask chosen for this table is 0xFF, not * the 0x3F used on older devices. This is because the vast majority of MI * cmds on Gen9 use a standard 8-bit Length field. * All the Gen9 blitter instructions are standard 0xFF length mask, and * none allow access to non-general registers, so in fact no BLT cmds are * included in the table at all. *
*/ staticconststruct drm_i915_cmd_descriptor gen9_blt_cmds[] = {
CMD( MI_NOOP, SMI, F, 1, S ),
CMD( MI_USER_INTERRUPT, SMI, F, 1, S ),
CMD( MI_WAIT_FOR_EVENT, SMI, F, 1, S ),
CMD( MI_FLUSH, SMI, F, 1, S ),
CMD( MI_ARB_CHECK, SMI, F, 1, S ),
CMD( MI_REPORT_HEAD, SMI, F, 1, S ),
CMD( MI_ARB_ON_OFF, SMI, F, 1, S ),
CMD( MI_SUSPEND_FLUSH, SMI, F, 1, S ),
CMD( MI_LOAD_SCAN_LINES_INCL, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_SCAN_LINES_EXCL, SMI, !F, 0x3F, S ),
CMD( MI_STORE_DWORD_IMM, SMI, !F, 0x3FF, S ),
CMD( MI_LOAD_REGISTER_IMM(1), SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 2 } ),
CMD( MI_UPDATE_GTT, SMI, !F, 0x3FF, S ),
CMD( MI_STORE_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_FLUSH_DW, SMI, !F, 0x3F, S ),
CMD( MI_LOAD_REGISTER_MEM_GEN8, SMI, F, 4, W,
.reg = { .offset = 1, .mask = 0x007FFFFC } ),
CMD( MI_LOAD_REGISTER_REG, SMI, !F, 0xFF, W,
.reg = { .offset = 1, .mask = 0x007FFFFC, .step = 1 } ),
/* * We allow BB_START but apply further checks. We just sanitize the * basic fields here.
*/ #define MI_BB_START_OPERAND_MASK GENMASK(SMI-1, 0) #define MI_BB_START_OPERAND_EXPECT (MI_BATCH_PPGTT_HSW | 1)
CMD( MI_BATCH_BUFFER_START_GEN8, SMI, !F, 0xFF, B,
.bits = {{
.offset = 0,
.mask = MI_BB_START_OPERAND_MASK,
.expected = MI_BB_START_OPERAND_EXPECT,
}}, ),
};
/* * Register whitelists, sorted by increasing register offset.
*/
/* * An individual whitelist entry granting access to register addr. If * mask is non-zero the argument of immediate register writes will be * AND-ed with mask, and the command will be rejected if the result * doesn't match value. * * Registers with non-zero mask are only allowed to be written using * LRI.
*/ struct drm_i915_reg_descriptor {
i915_reg_t addr;
u32 mask;
u32 value;
};
/* * Convenience macro for adding 64-bit registers. * * Some registers that userspace accesses are 64 bits. The register * access commands only allow 32-bit accesses. Hence, we have to include * entries for both halves of the 64-bit registers.
*/ #define REG64(_reg) \
{ .addr = _reg }, \
{ .addr = _reg ## _UDW }
if (curr < previous) {
drm_err(&engine->i915->drm, "CMD: %s [%d] command table not sorted: " "table=%d entry=%d cmd=0x%08X prev=0x%08X\n",
engine->name, engine->id,
i, j, curr, previous);
ret = false;
}
previous = curr;
}
}
return ret;
}
staticbool check_sorted(conststruct intel_engine_cs *engine, conststruct drm_i915_reg_descriptor *reg_table, int reg_count)
{ int i;
u32 previous = 0; bool ret = true;
for (i = 0; i < reg_count; i++) {
u32 curr = i915_mmio_reg_offset(reg_table[i].addr);
if (curr < previous) {
drm_err(&engine->i915->drm, "CMD: %s [%d] register table not sorted: " "entry=%d reg=0x%08X prev=0x%08X\n",
engine->name, engine->id,
i, curr, previous);
ret = false;
}
previous = curr;
}
return ret;
}
staticbool validate_regs_sorted(struct intel_engine_cs *engine)
{ int i; conststruct drm_i915_reg_table *table;
for (i = 0; i < engine->reg_table_count; i++) {
table = &engine->reg_tables[i]; if (!check_sorted(engine, table->regs, table->num_regs)) returnfalse;
}
/* * Different command ranges have different numbers of bits for the opcode. For * example, MI commands use bits 31:23 while 3D commands use bits 31:16. The * problem is that, for example, MI commands use bits 22:16 for other fields * such as GGTT vs PPGTT bits. If we include those bits in the mask then when * we mask a command from a batch it could hash to the wrong bucket due to * non-opcode bits being set. But if we don't include those bits, some 3D * commands may hash to the same bucket due to not including opcode bits that * make the command unique. For now, we will risk hashing to the same bucket.
*/ staticinline u32 cmd_header_key(u32 x)
{ switch (x >> INSTR_CLIENT_SHIFT) { default: case INSTR_MI_CLIENT: return x >> STD_MI_OPCODE_SHIFT; case INSTR_RC_CLIENT: return x >> STD_3D_OPCODE_SHIFT; case INSTR_BC_CLIENT: return x >> STD_2D_OPCODE_SHIFT;
}
}
staticint init_hash_table(struct intel_engine_cs *engine, conststruct drm_i915_cmd_table *cmd_tables, int cmd_table_count)
{ int i, j;
hash_init(engine->cmd_hash);
for (i = 0; i < cmd_table_count; i++) { conststruct drm_i915_cmd_table *table = &cmd_tables[i];
hash_for_each_safe(engine->cmd_hash, i, tmp, desc_node, node) {
hash_del(&desc_node->node);
kfree(desc_node);
}
}
/** * intel_engine_init_cmd_parser() - set cmd parser related fields for an engine * @engine: the engine to initialize * * Optionally initializes fields related to batch buffer command parsing in the * struct intel_engine_cs based on whether the platform requires software * command parsing.
*/ int intel_engine_init_cmd_parser(struct intel_engine_cs *engine)
{ conststruct drm_i915_cmd_table *cmd_tables; int cmd_table_count; int ret;
if (GRAPHICS_VER(engine->i915) == 9) {
engine->reg_tables = gen9_blt_reg_tables;
engine->reg_table_count =
ARRAY_SIZE(gen9_blt_reg_tables);
} elseif (IS_HASWELL(engine->i915)) {
engine->reg_tables = hsw_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(hsw_blt_reg_tables);
} else {
engine->reg_tables = ivb_blt_reg_tables;
engine->reg_table_count = ARRAY_SIZE(ivb_blt_reg_tables);
} break; case VIDEO_ENHANCEMENT_CLASS:
cmd_tables = hsw_vebox_cmd_table;
cmd_table_count = ARRAY_SIZE(hsw_vebox_cmd_table); /* VECS can use the same length_mask function as VCS */
engine->get_cmd_length_mask = gen7_bsd_get_cmd_length_mask; break; default:
MISSING_CASE(engine->class); goto out;
}
if (!validate_cmds_sorted(engine, cmd_tables, cmd_table_count)) {
drm_err(&engine->i915->drm, "%s: command descriptions are not sorted\n",
engine->name); goto out;
} if (!validate_regs_sorted(engine)) {
drm_err(&engine->i915->drm, "%s: registers are not sorted\n", engine->name); goto out;
}
ret = init_hash_table(engine, cmd_tables, cmd_table_count); if (ret) {
drm_err(&engine->i915->drm, "%s: initialised failed!\n", engine->name);
fini_hash_table(engine); goto out;
}
engine->flags |= I915_ENGINE_USING_CMD_PARSER;
out: if (intel_engine_requires_cmd_parser(engine) &&
!intel_engine_using_cmd_parser(engine)) return -EINVAL;
return 0;
}
/** * intel_engine_cleanup_cmd_parser() - clean up cmd parser related fields * @engine: the engine to clean up * * Releases any resources related to command parsing that may have been * initialized for the specified engine.
*/ void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine)
{ if (!intel_engine_using_cmd_parser(engine)) return;
/* * Returns a pointer to a descriptor for the command specified by cmd_header. * * The caller must supply space for a default descriptor via the default_desc * parameter. If no descriptor for the specified command exists in the engine's * command parser tables, this function fills in default_desc based on the * engine's default length encoding and returns default_desc.
*/ staticconststruct drm_i915_cmd_descriptor*
find_cmd(struct intel_engine_cs *engine,
u32 cmd_header, conststruct drm_i915_cmd_descriptor *desc, struct drm_i915_cmd_descriptor *default_desc)
{
u32 mask;
if (((cmd_header ^ desc->cmd.value) & desc->cmd.mask) == 0) return desc;
desc = find_cmd_in_table(engine, cmd_header); if (desc) return desc;
mask = engine->get_cmd_length_mask(cmd_header); if (!mask) return NULL;
/* Returns a vmap'd pointer to dst_obj, which the caller must unmap */ static u32 *copy_batch(struct drm_i915_gem_object *dst_obj, struct drm_i915_gem_object *src_obj, unsignedlong offset, unsignedlong length, bool *needs_clflush_after)
{ unsignedint src_needs_clflush; unsignedint dst_needs_clflush; void *dst, *src; int ret;
ret = i915_gem_object_prepare_write(dst_obj, &dst_needs_clflush); if (ret) return ERR_PTR(ret);
dst = i915_gem_object_pin_map(dst_obj, I915_MAP_WB);
i915_gem_object_finish_access(dst_obj); if (IS_ERR(dst)) return dst;
ret = i915_gem_object_prepare_read(src_obj, &src_needs_clflush); if (ret) {
i915_gem_object_unpin_map(dst_obj); return ERR_PTR(ret);
}
src = ERR_PTR(-ENODEV); if (src_needs_clflush && i915_has_memcpy_from_wc()) {
src = i915_gem_object_pin_map(src_obj, I915_MAP_WC); if (!IS_ERR(src)) {
i915_unaligned_memcpy_from_wc(dst,
src + offset,
length);
i915_gem_object_unpin_map(src_obj);
}
} if (IS_ERR(src)) { unsignedlong x, n, remain; void *ptr;
/* * We can avoid clflushing partial cachelines before the write * if we only every write full cache-lines. Since we know that * both the source and destination are in multiples of * PAGE_SIZE, we can simply round up to the next cacheline. * We don't care about copying too much here as we only * validate up to the end of the batch.
*/
remain = length; if (dst_needs_clflush & CLFLUSH_BEFORE)
remain = round_up(remain,
boot_cpu_data.x86_clflush_size);
ptr = dst;
x = offset_in_page(offset); for (n = offset >> PAGE_SHIFT; remain; n++) { int len = min(remain, PAGE_SIZE - x);
if (desc->flags & CMD_DESC_REGISTER) { /* * Get the distance between individual register offset * fields if the command can perform more than one * access at a time.
*/ const u32 step = desc->reg.step ? desc->reg.step : length;
u32 offset;
if (!reg) {
DRM_DEBUG("CMD: Rejected register 0x%08X in command: 0x%08X (%s)\n",
reg_addr, *cmd, engine->name); returnfalse;
}
/* * Check the value written to the register against the * allowed mask/value pair given in the whitelist entry.
*/ if (reg->mask) { if (cmd_desc_is(desc, MI_LOAD_REGISTER_MEM)) {
DRM_DEBUG("CMD: Rejected LRM to masked register 0x%08X\n",
reg_addr); returnfalse;
}
if (cmd_desc_is(desc, MI_LOAD_REGISTER_REG)) {
DRM_DEBUG("CMD: Rejected LRR to masked register 0x%08X\n",
reg_addr); returnfalse;
}
/* For igt compatibility on older platforms */ if (!jump_whitelist) {
DRM_DEBUG("CMD: Rejecting BB_START for ggtt based submission\n"); return -EACCES;
}
if (length != 3) {
DRM_DEBUG("CMD: Recursive BB_START with bad length(%u)\n",
length); return -EINVAL;
}
/* * Any underflow of jump_target is guaranteed to be outside the range * of a u32, so >= test catches both too large and too small
*/ if (jump_offset >= batch_length) {
DRM_DEBUG("CMD: BB_START to 0x%llx jumps out of BB\n",
jump_target); return -EINVAL;
}
/* * This cannot overflow a u32 because we already checked jump_offset * is within the BB, and the batch_length is a u32
*/
target_cmd_offset = lower_32_bits(jump_offset);
target_cmd_index = target_cmd_offset / sizeof(u32);
if (IS_ERR(jump_whitelist)) return PTR_ERR(jump_whitelist);
if (!test_bit(target_cmd_index, jump_whitelist)) {
DRM_DEBUG("CMD: BB_START to 0x%llx not a previously executed cmd\n",
jump_target); return -EINVAL;
}
/* * We expect batch_length to be less than 256KiB for known users, * i.e. we need at most an 8KiB bitmap allocation which should be * reasonably cheap due to kmalloc caches.
*/
/* Prefer to report transient allocation failure rather than hit oom */
jmp = bitmap_zalloc(DIV_ROUND_UP(batch_length, sizeof(u32)),
GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); if (!jmp) return ERR_PTR(-ENOMEM);
return jmp;
}
#define LENGTH_BIAS 2
/** * intel_engine_cmd_parser() - parse a batch buffer for privilege violations * @engine: the engine on which the batch is to execute * @batch: the batch buffer in question * @batch_offset: byte offset in the batch at which execution starts * @batch_length: length of the commands in batch_obj * @shadow: validated copy of the batch buffer in question * @trampoline: true if we need to trampoline into privileged execution * * Parses the specified batch buffer looking for privilege violations as * described in the overview. * * Return: non-zero if the parser finds violations or otherwise fails; -EACCES * if the batch appears legal but should use hardware parsing
*/
/* * We use the batch length as size because the shadow object is as * large or larger and copy_batch() will write MI_NOPs to the extra * space. Parsing should be faster in some cases this way.
*/
batch_end = cmd + batch_length / sizeof(*batch_end); do {
u32 length;
if (*cmd == MI_BATCH_BUFFER_END) break;
desc = find_cmd(engine, *cmd, desc, &default_desc); if (!desc) {
DRM_DEBUG("CMD: Unrecognized command: 0x%08X\n", *cmd);
ret = -EINVAL; break;
}
if (!check_cmd(engine, desc, cmd, length)) {
ret = -EACCES; break;
}
if (cmd_desc_is(desc, MI_BATCH_BUFFER_START)) {
ret = check_bbstart(cmd, offset, length, batch_length,
batch_addr, shadow_addr,
jump_whitelist); break;
}
if (!IS_ERR_OR_NULL(jump_whitelist))
__set_bit(offset, jump_whitelist);
cmd += length;
offset += length; if (cmd >= batch_end) {
DRM_DEBUG("CMD: Got to the end of the buffer w/o a BBE cmd!\n");
ret = -EINVAL; break;
}
} while (1);
if (trampoline) { /* * With the trampoline, the shadow is executed twice. * * 1 - starting at offset 0, in privileged mode * 2 - starting at offset batch_len, as non-privileged * * Only if the batch is valid and safe to execute, do we * allow the first privileged execution to proceed. If not, * we terminate the first batch and use the second batchbuffer * entry to chain to the original unsafe non-privileged batch, * leaving it to the HW to validate.
*/
*batch_end = MI_BATCH_BUFFER_END;
if (ret) { /* Batch unsafe to execute with privileges, cancel! */
cmd = page_mask_bits(shadow->obj->mm.mapping);
*cmd = MI_BATCH_BUFFER_END;
/* If batch is unsafe but valid, jump to the original */ if (ret == -EACCES) { unsignedint flags;
flags = MI_BATCH_NON_SECURE_I965; if (IS_HASWELL(engine->i915))
flags = MI_BATCH_NON_SECURE_HSW;
if (!IS_ERR_OR_NULL(jump_whitelist))
kfree(jump_whitelist);
i915_gem_object_unpin_map(shadow->obj); return ret;
}
/** * i915_cmd_parser_get_version() - get the cmd parser version number * @dev_priv: i915 device private * * The cmd parser maintains a simple increasing integer version number suitable * for passing to userspace clients to determine what operations are permitted. * * Return: the current version number of the cmd parser
*/ int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv)
{ struct intel_engine_cs *engine; bool active = false;
/* If the command parser is not enabled, report 0 - unsupported */
for_each_uabi_engine(engine, dev_priv) { if (intel_engine_using_cmd_parser(engine)) {
active = true; break;
}
} if (!active) return 0;
/* * Command parser version history * * 1. Initial version. Checks batches and reports violations, but leaves * hardware parsing enabled (so does not allow new use cases). * 2. Allow access to the MI_PREDICATE_SRC0 and * MI_PREDICATE_SRC1 registers. * 3. Allow access to the GPGPU_THREADS_DISPATCHED register. * 4. L3 atomic chicken bits of HSW_SCRATCH1 and HSW_ROW_CHICKEN3. * 5. GPGPU dispatch compute indirect registers. * 6. TIMESTAMP register and Haswell CS GPR registers * 7. Allow MI_LOAD_REGISTER_REG between whitelisted registers. * 8. Don't report cmd_check() failures as EINVAL errors to userspace; * rely on the HW to NOOP disallowed commands as it would without * the parser enabled. * 9. Don't whitelist or handle oacontrol specially, as ownership * for oacontrol state is moving to i915-perf. * 10. Support for Gen9 BCS Parsing
*/ return 10;
}
Messung V0.5
¤ Dauer der Verarbeitung: 0.21 Sekunden
(vorverarbeitet)
¤
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.