Quellcodebibliothek Statistik Leitseite products/sources/formale Sprachen/C/Linux/arch/arm64/kernel/   (Open Source Betriebssystem Version 6.17.9©)  Datei vom 24.10.2025 mit Größe 140 kB image not shown  

Quelle  cpufeature.c   Sprache: C

 
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Contains CPU feature definitions
 *
 * Copyright (C) 2015 ARM Ltd.
 *
 * A note for the weary kernel hacker: the code here is confusing and hard to
 * follow! That's partly because it's solving a nasty problem, but also because
 * there's a little bit of over-abstraction that tends to obscure what's going
 * on behind a maze of helper functions and macros.
 *
 * The basic problem is that hardware folks have started gluing together CPUs
 * with distinct architectural features; in some cases even creating SoCs where
 * user-visible instructions are available only on a subset of the available
 * cores. We try to address this by snapshotting the feature registers of the
 * boot CPU and comparing these with the feature registers of each secondary
 * CPU when bringing them up. If there is a mismatch, then we update the
 * snapshot state to indicate the lowest-common denominator of the feature,
 * known as the "safe" value. This snapshot state can be queried to view the
 * "sanitised" value of a feature register.
 *
 * The sanitised register values are used to decide which capabilities we
 * have in the system. These may be in the form of traditional "hwcaps"
 * advertised to userspace or internal "cpucaps" which are used to configure
 * things like alternative patching and static keys. While a feature mismatch
 * may result in a TAINT_CPU_OUT_OF_SPEC kernel taint, a capability mismatch
 * may prevent a CPU from being onlined at all.
 *
 * Some implementation details worth remembering:
 *
 * - Mismatched features are *always* sanitised to a "safe" value, which
 *   usually indicates that the feature is not supported.
 *
 * - A mismatched feature marked with FTR_STRICT will cause a "SANITY CHECK"
 *   warning when onlining an offending CPU and the kernel will be tainted
 *   with TAINT_CPU_OUT_OF_SPEC.
 *
 * - Features marked as FTR_VISIBLE have their sanitised value visible to
 *   userspace. FTR_VISIBLE features in registers that are only visible
 *   to EL0 by trapping *must* have a corresponding HWCAP so that late
 *   onlining of CPUs cannot lead to features disappearing at runtime.
 *
 * - A "feature" is typically a 4-bit register field. A "capability" is the
 *   high-level description derived from the sanitised field value.
 *
 * - Read the Arm ARM (DDI 0487F.a) section D13.1.3 ("Principles of the ID
 *   scheme for fields in ID registers") to understand when feature fields
 *   may be signed or unsigned (FTR_SIGNED and FTR_UNSIGNED accordingly).
 *
 * - KVM exposes its own view of the feature registers to guest operating
 *   systems regardless of FTR_VISIBLE. This is typically driven from the
 *   sanitised register values to allow virtual CPUs to be migrated between
 *   arbitrary physical CPUs, but some features not present on the host are
 *   also advertised and emulated. Look at sys_reg_descs[] for the gory
 *   details.
 *
 * - If the arm64_ftr_bits[] for a register has a missing field, then this
 *   field is treated as STRICT RES0, including for read_sanitised_ftr_reg().
 *   This is stronger than FTR_HIDDEN and can be used to hide features from
 *   KVM guests.
 */


#define pr_fmt(fmt) "CPU features: " fmt

#include <linux/bsearch.h>
#include <linux/cpumask.h>
#include <linux/crash_dump.h>
#include <linux/kstrtox.h>
#include <linux/sort.h>
#include <linux/stop_machine.h>
#include <linux/sysfs.h>
#include <linux/types.h>
#include <linux/minmax.h>
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/kasan.h>
#include <linux/percpu.h>
#include <linux/sched/isolation.h>

#include <asm/cpu.h>
#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/fpsimd.h>
#include <asm/hwcap.h>
#include <asm/insn.h>
#include <asm/kvm_host.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/mte.h>
#include <asm/hypervisor.h>
#include <asm/processor.h>
#include <asm/smp.h>
#include <asm/sysreg.h>
#include <asm/traps.h>
#include <asm/vectors.h>
#include <asm/virt.h>

/* Kernel representation of AT_HWCAP and AT_HWCAP2 */
static DECLARE_BITMAP(elf_hwcap, MAX_CPU_FEATURES) __read_mostly;

#ifdef CONFIG_COMPAT
#define COMPAT_ELF_HWCAP_DEFAULT \
    (COMPAT_HWCAP_HALF|COMPAT_HWCAP_THUMB|\
     COMPAT_HWCAP_FAST_MULT|COMPAT_HWCAP_EDSP|\
     COMPAT_HWCAP_TLS|COMPAT_HWCAP_IDIV|\
     COMPAT_HWCAP_LPAE)
unsigned int compat_elf_hwcap __read_mostly = COMPAT_ELF_HWCAP_DEFAULT;
unsigned int compat_elf_hwcap2 __read_mostly;
unsigned int compat_elf_hwcap3 __read_mostly;
#endif

DECLARE_BITMAP(system_cpucaps, ARM64_NCAPS);
EXPORT_SYMBOL(system_cpucaps);
static struct arm64_cpu_capabilities const __ro_after_init *cpucap_ptrs[ARM64_NCAPS];

DECLARE_BITMAP(boot_cpucaps, ARM64_NCAPS);

/*
 * arm64_use_ng_mappings must be placed in the .data section, otherwise it
 * ends up in the .bss section where it is initialized in early_map_kernel()
 * after the MMU (with the idmap) was enabled. create_init_idmap() - which
 * runs before early_map_kernel() and reads the variable via PTE_MAYBE_NG -
 * may end up generating an incorrect idmap page table attributes.
 */

bool arm64_use_ng_mappings __read_mostly = false;
EXPORT_SYMBOL(arm64_use_ng_mappings);

DEFINE_PER_CPU_READ_MOSTLY(const char *, this_cpu_vector) = vectors;

/*
 * Permit PER_LINUX32 and execve() of 32-bit binaries even if not all CPUs
 * support it?
 */

static bool __read_mostly allow_mismatched_32bit_el0;

/*
 * Static branch enabled only if allow_mismatched_32bit_el0 is set and we have
 * seen at least one CPU capable of 32-bit EL0.
 */

DEFINE_STATIC_KEY_FALSE(arm64_mismatched_32bit_el0);

/*
 * Mask of CPUs supporting 32-bit EL0.
 * Only valid if arm64_mismatched_32bit_el0 is enabled.
 */

static cpumask_var_t cpu_32bit_el0_mask __cpumask_var_read_mostly;

void dump_cpu_features(void)
{
 /* file-wide pr_fmt adds "CPU features: " prefix */
 pr_emerg("0x%*pb\n", ARM64_NCAPS, &system_cpucaps);
}

#define __ARM64_MAX_POSITIVE(reg, field)    \
  ((reg##_##field##_SIGNED ?    \
    BIT(reg##_##field##_WIDTH - 1) :   \
    BIT(reg##_##field##_WIDTH)) - 1)

#define __ARM64_MIN_NEGATIVE(reg, field)  BIT(reg##_##field##_WIDTH - 1)

#define __ARM64_CPUID_FIELDS(reg, field, min_value, max_value)  \
  .sys_reg = SYS_##reg,     \
  .field_pos = reg##_##field##_SHIFT,   \
  .field_width = reg##_##field##_WIDTH,   \
  .sign = reg##_##field##_SIGNED,    \
  .min_field_value = min_value,    \
  .max_field_value = max_value,

/*
 * ARM64_CPUID_FIELDS() encodes a field with a range from min_value to
 * an implicit maximum that depends on the sign-ess of the field.
 *
 * An unsigned field will be capped at all ones, while a signed field
 * will be limited to the positive half only.
 */

#define ARM64_CPUID_FIELDS(reg, field, min_value)   \
 __ARM64_CPUID_FIELDS(reg, field,    \
        SYS_FIELD_VALUE(reg, field, min_value), \
        __ARM64_MAX_POSITIVE(reg, field))

/*
 * ARM64_CPUID_FIELDS_NEG() encodes a field with a range from an
 * implicit minimal value to max_value. This should be used when
 * matching a non-implemented property.
 */

#define ARM64_CPUID_FIELDS_NEG(reg, field, max_value)   \
 __ARM64_CPUID_FIELDS(reg, field,    \
        __ARM64_MIN_NEGATIVE(reg, field),  \
        SYS_FIELD_VALUE(reg, field, max_value))

#define __ARM64_FTR_BITS(SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 {      \
  .sign = SIGNED,    \
  .visible = VISIBLE,   \
  .strict = STRICT,   \
  .type = TYPE,    \
  .shift = SHIFT,    \
  .width = WIDTH,    \
  .safe_val = SAFE_VAL,   \
 }

/* Define a feature with unsigned values */
#define ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 __ARM64_FTR_BITS(FTR_UNSIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)

/* Define a feature with a signed value */
#define S_ARM64_FTR_BITS(VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL) \
 __ARM64_FTR_BITS(FTR_SIGNED, VISIBLE, STRICT, TYPE, SHIFT, WIDTH, SAFE_VAL)

#define ARM64_FTR_END     \
 {      \
  .width = 0,    \
 }

static void cpu_enable_cnp(struct arm64_cpu_capabilities const *cap);

static bool __system_matches_cap(unsigned int n);

/*
 * NOTE: Any changes to the visibility of features should be kept in
 * sync with the documentation of the CPU feature register ABI.
 */

static const struct arm64_ftr_bits ftr_id_aa64isar0[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RNDR_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TLB_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_TS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_FHM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_DP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM4_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SM3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_RDM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_ATOMIC_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_CRC32_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_SHA1_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR0_EL1_AES_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64isar1[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_XS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_I8MM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DGH_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_BF16_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SPECRES_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_SB_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FRINTTS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPI_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_GPA_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_LRCPC_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_FCMA_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_JSCVT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
         FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_API_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
         FTR_STRICT, FTR_EXACT, ID_AA64ISAR1_EL1_APA_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR1_EL1_DPB_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64isar2[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_LUT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CSSC_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRFM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_CLRBHB_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_BC_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_MOPS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
         FTR_STRICT, FTR_EXACT, ID_AA64ISAR2_EL1_APA3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_PTR_AUTH),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_GPA3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_RPRES_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR2_EL1_WFxT_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64isar3[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FPRCVT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64ISAR3_EL1_FAMINMAX_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64pfr0[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_CSV2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_DIT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AMU_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_MPAM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SEL2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
       FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_SVE_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_RAS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_GIC_SHIFT, 4, 0),
 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_AdvSIMD_SHIFT, 4, ID_AA64PFR0_EL1_AdvSIMD_NI),
 S_ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_FP_SHIFT, 4, ID_AA64PFR0_EL1_FP_NI),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL1_SHIFT, 4, ID_AA64PFR0_EL1_EL1_IMP),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR0_EL1_EL0_SHIFT, 4, ID_AA64PFR0_EL1_EL0_IMP),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64pfr1[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_DF2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_GCS),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_GCS_SHIFT, 4, 0),
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_frac_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SME_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MPAM_frac_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_RAS_frac_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_MTE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_MTE_SHIFT, 4, ID_AA64PFR1_EL1_MTE_NI),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_SSBS_SHIFT, 4, ID_AA64PFR1_EL1_SSBS_NI),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_BTI),
        FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR1_EL1_BT_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64pfr2[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_FPMR_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTEFAR_SHIFT, 4, ID_AA64PFR2_EL1_MTEFAR_NI),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64PFR2_EL1_MTESTOREONLY_SHIFT, 4, ID_AA64PFR2_EL1_MTESTOREONLY_NI),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64zfr0[] = {
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F64MM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F32MM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_F16MM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_I8MM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SM4_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SHA3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_B16B16_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BF16_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_BitPerm_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_EltPerm_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_AES_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SVE),
         FTR_STRICT, FTR_LOWER_SAFE, ID_AA64ZFR0_EL1_SVEver_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64smfr0[] = {
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_FA64_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_LUTv2_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMEver_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I64_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F64F64_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I16I32_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16B16_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F16_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F16_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F8F32_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_I8I32_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F16F32_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_B16F32_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_BI32I32_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_F32F32_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8FMA_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP4_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SF8DP2_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SBitPerm_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_AES_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SFEXPA_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_STMOP_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_SME),
         FTR_STRICT, FTR_EXACT, ID_AA64SMFR0_EL1_SMOP4_SHIFT, 1, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64fpfr0[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8CVT_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8FMA_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP4_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8DP2_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8MM8_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8MM4_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E4M3_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, ID_AA64FPFR0_EL1_F8E5M2_SHIFT, 1, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64mmfr0[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ECV_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_FGT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_EXS_SHIFT, 4, 0),
 /*
 * Page size not being supported at Stage-2 is not fatal. You
 * just give up KVM if PAGE_SIZE isn't supported there. Go fix
 * your favourite nesting hypervisor.
 *
 * There is a small corner case where the hypervisor explicitly
 * advertises a given granule size at Stage-2 (value 2) on some
 * vCPUs, and uses the fallback to Stage-1 (value 0) for other
 * vCPUs. Although this is not forbidden by the architecture, it
 * indicates that the hypervisor is being silly (or buggy).
 *
 * We make no effort to cope with this and pretend that if these
 * fields are inconsistent across vCPUs, then it isn't worth
 * trying to bring KVM up.
 */

 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN4_2_SHIFT, 4, 1),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN64_2_SHIFT, 4, 1),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64MMFR0_EL1_TGRAN16_2_SHIFT, 4, 1),
 /*
 * We already refuse to boot CPUs that don't support our configured
 * page size, so we can only detect mismatches for a page size other
 * than the one we're currently using. Unfortunately, SoCs like this
 * exist in the wild so, even though we don't like it, we'll have to go
 * along with it and treat them as non-strict.
 */

 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN4_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN4_NI),
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN64_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN64_NI),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_TGRAN16_SHIFT, 4, ID_AA64MMFR0_EL1_TGRAN16_NI),

 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGENDEL0_SHIFT, 4, 0),
 /* Linux shouldn't care about secure memory */
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_SNSMEM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_BIGEND_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_ASIDBITS_SHIFT, 4, 0),
 /*
 * Differing PARange is fine as long as all peripherals and memory are mapped
 * within the minimum PARange of all CPUs
 */

 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR0_EL1_PARANGE_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64mmfr1[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ECBHB_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TIDCP1_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_AFP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HCX_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_ETS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_TWED_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_XNX_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1_SpecSEI_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_PAN_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_LO_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HPDS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VH_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_VMIDBits_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR1_EL1_HAFDBS_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64mmfr2[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_E0PD_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_EVT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_BBM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_TTL_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_FWB_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IDS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_AT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_ST_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_NV_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CCIDX_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_VARange_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_IESB_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_LSM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_UAO_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR2_EL1_CnP_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64mmfr3[] = {
 ARM64_FTR_BITS(FTR_VISIBLE_IF_IS_ENABLED(CONFIG_ARM64_POE),
         FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1POE_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_S1PIE_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_SCTLRX_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64MMFR3_EL1_TCRX_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64mmfr4[] = {
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_E2H0_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64MMFR4_EL1_NV_frac_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_ctr[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, 31, 1, 1), /* RES1 */
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DIC_SHIFT, 1, 1),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IDC_SHIFT, 1, 1),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_CWG_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_HIGHER_OR_ZERO_SAFE, CTR_EL0_ERG_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_DminLine_SHIFT, 4, 1),
 /*
 * Linux can handle differing I-cache policies. Userspace JITs will
 * make use of *minLine.
 * If we have differing I-cache policies, report it as the weakest - VIPT.
 */

 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_EXACT, CTR_EL0_L1Ip_SHIFT, 2, CTR_EL0_L1Ip_VIPT), /* L1Ip */
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, CTR_EL0_IminLine_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static struct arm64_ftr_override __ro_after_init no_override = { };

struct arm64_ftr_reg arm64_ftr_reg_ctrel0 = {
 .name  = "SYS_CTR_EL0",
 .ftr_bits = ftr_ctr,
 .override = &no_override,
};

static const struct arm64_ftr_bits ftr_id_mmfr0[] = {
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_InnerShr_SHIFT, 4, 0xf),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_FCSE_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_AuxReg_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_TCM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_ShareLvl_SHIFT, 4, 0),
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_OuterShr_SHIFT, 4, 0xf),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_PMSA_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR0_EL1_VMSA_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_aa64dfr0[] = {
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_DoubleLock_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_PMSVer_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_CTX_CMPs_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_WRPs_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_AA64DFR0_EL1_BRPs_SHIFT, 4, 0),
 /*
 * We can instantiate multiple PMU instances with different levels
 * of support.
 */

 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_AA64DFR0_EL1_PMUVer_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, ID_AA64DFR0_EL1_DebugVer_SHIFT, 4, 0x6),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_mvfr0[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPRound_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPShVec_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSqrt_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDivide_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPTrap_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPDP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_FPSP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR0_EL1_SIMDReg_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_mvfr1[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDFMAC_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPHP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDHP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDSP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDInt_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_SIMDLS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPDNaN_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR1_EL1_FPFtZ_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_mvfr2[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_FPMisc_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MVFR2_EL1_SIMDMisc_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_dczid[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_EXACT, DCZID_EL0_DZP_SHIFT, 1, 1),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, DCZID_EL0_BS_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_gmid[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, GMID_EL1_BS_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_isar0[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Divide_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Debug_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Coproc_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_CmpBranch_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitField_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_BitCount_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR0_EL1_Swap_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_isar5[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_RDM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_CRC32_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SHA1_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_AES_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR5_EL1_SEVL_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_mmfr4[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_EVT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CCIDX_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_LSM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_HPDS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_CnP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_XNX_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR4_EL1_AC2_SHIFT, 4, 0),

 /*
 * SpecSEI = 1 indicates that the PE might generate an SError on an
 * external abort on speculative read. It is safe to assume that an
 * SError might be generated than it will not be. Hence it has been
 * classified as FTR_HIGHER_SAFE.
 */

 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_HIGHER_SAFE, ID_MMFR4_EL1_SpecSEI_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_isar4[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SWP_frac_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_PSR_M_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SynchPrim_frac_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Barrier_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_SMC_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Writeback_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_WithShifts_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR4_EL1_Unpriv_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_mmfr5[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_MMFR5_EL1_ETS_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_isar6[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_I8MM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_BF16_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SPECRES_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_SB_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_FHM_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_DP_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_ISAR6_EL1_JSCVT_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_pfr0[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_DIT_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_CSV2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State3_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State2_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State1_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR0_EL1_State0_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_pfr1[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GIC_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virt_frac_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Sec_frac_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_GenTimer_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Virtualization_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_MProgMod_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_Security_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_PFR1_EL1_ProgMod_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_pfr2[] = {
 ARM64_FTR_BITS(FTR_VISIBLE, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_SSBS_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, ID_PFR2_EL1_CSV3_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_dfr0[] = {
 /* [31:28] TraceFilt */
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_EXACT, ID_DFR0_EL1_PerfMon_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MProfDbg_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapTrc_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopTrc_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_MMapDbg_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopSDbg_SHIFT, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR0_EL1_CopDbg_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_id_dfr1[] = {
 S_ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, ID_DFR1_EL1_MTPMU_SHIFT, 4, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_mpamidr[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PMG_MAX_SHIFT, MPAMIDR_EL1_PMG_MAX_WIDTH, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_VPMR_MAX_SHIFT, MPAMIDR_EL1_VPMR_MAX_WIDTH, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_HAS_HCR_SHIFT, 1, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_NONSTRICT, FTR_LOWER_SAFE, MPAMIDR_EL1_PARTID_MAX_SHIFT, MPAMIDR_EL1_PARTID_MAX_WIDTH, 0),
 ARM64_FTR_END,
};

/*
 * Common ftr bits for a 32bit register with all hidden, strict
 * attributes, with 4bit feature fields and a default safe value of
 * 0. Covers the following 32bit registers:
 * id_isar[1-3], id_mmfr[1-3]
 */

static const struct arm64_ftr_bits ftr_generic_32bits[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 28, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 24, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 20, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 16, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 12, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 8, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 4, 4, 0),
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_LOWER_SAFE, 0, 4, 0),
 ARM64_FTR_END,
};

/* Table for a single 32bit feature value */
static const struct arm64_ftr_bits ftr_single32[] = {
 ARM64_FTR_BITS(FTR_HIDDEN, FTR_STRICT, FTR_EXACT, 0, 32, 0),
 ARM64_FTR_END,
};

static const struct arm64_ftr_bits ftr_raz[] = {
 ARM64_FTR_END,
};

#define __ARM64_FTR_REG_OVERRIDE(id_str, id, table, ovr) { \
  .sys_id = id,     \
  .reg =  &(struct arm64_ftr_reg){  \
   .name = id_str,    \
   .override = (ovr),   \
   .ftr_bits = &((table)[0]),  \
 }}

#define ARM64_FTR_REG_OVERRIDE(id, table, ovr) \
 __ARM64_FTR_REG_OVERRIDE(#id, id, table, ovr)

#define ARM64_FTR_REG(id, table)  \
 __ARM64_FTR_REG_OVERRIDE(#id, id, table, &no_override)

struct arm64_ftr_override __read_mostly id_aa64mmfr0_override;
struct arm64_ftr_override __read_mostly id_aa64mmfr1_override;
struct arm64_ftr_override __read_mostly id_aa64mmfr2_override;
struct arm64_ftr_override __read_mostly id_aa64pfr0_override;
struct arm64_ftr_override __read_mostly id_aa64pfr1_override;
struct arm64_ftr_override __read_mostly id_aa64zfr0_override;
struct arm64_ftr_override __read_mostly id_aa64smfr0_override;
struct arm64_ftr_override __read_mostly id_aa64isar1_override;
struct arm64_ftr_override __read_mostly id_aa64isar2_override;

struct arm64_ftr_override __read_mostly arm64_sw_feature_override;

static const struct __ftr_reg_entry {
 u32   sys_id;
 struct arm64_ftr_reg  *reg;
} arm64_ftr_regs[] = {

 /* Op1 = 0, CRn = 0, CRm = 1 */
 ARM64_FTR_REG(SYS_ID_PFR0_EL1, ftr_id_pfr0),
 ARM64_FTR_REG(SYS_ID_PFR1_EL1, ftr_id_pfr1),
 ARM64_FTR_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0),
 ARM64_FTR_REG(SYS_ID_MMFR0_EL1, ftr_id_mmfr0),
 ARM64_FTR_REG(SYS_ID_MMFR1_EL1, ftr_generic_32bits),
 ARM64_FTR_REG(SYS_ID_MMFR2_EL1, ftr_generic_32bits),
 ARM64_FTR_REG(SYS_ID_MMFR3_EL1, ftr_generic_32bits),

 /* Op1 = 0, CRn = 0, CRm = 2 */
 ARM64_FTR_REG(SYS_ID_ISAR0_EL1, ftr_id_isar0),
 ARM64_FTR_REG(SYS_ID_ISAR1_EL1, ftr_generic_32bits),
 ARM64_FTR_REG(SYS_ID_ISAR2_EL1, ftr_generic_32bits),
 ARM64_FTR_REG(SYS_ID_ISAR3_EL1, ftr_generic_32bits),
 ARM64_FTR_REG(SYS_ID_ISAR4_EL1, ftr_id_isar4),
 ARM64_FTR_REG(SYS_ID_ISAR5_EL1, ftr_id_isar5),
 ARM64_FTR_REG(SYS_ID_MMFR4_EL1, ftr_id_mmfr4),
 ARM64_FTR_REG(SYS_ID_ISAR6_EL1, ftr_id_isar6),

 /* Op1 = 0, CRn = 0, CRm = 3 */
 ARM64_FTR_REG(SYS_MVFR0_EL1, ftr_mvfr0),
 ARM64_FTR_REG(SYS_MVFR1_EL1, ftr_mvfr1),
 ARM64_FTR_REG(SYS_MVFR2_EL1, ftr_mvfr2),
 ARM64_FTR_REG(SYS_ID_PFR2_EL1, ftr_id_pfr2),
 ARM64_FTR_REG(SYS_ID_DFR1_EL1, ftr_id_dfr1),
 ARM64_FTR_REG(SYS_ID_MMFR5_EL1, ftr_id_mmfr5),

 /* Op1 = 0, CRn = 0, CRm = 4 */
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0,
          &id_aa64pfr0_override),
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1,
          &id_aa64pfr1_override),
 ARM64_FTR_REG(SYS_ID_AA64PFR2_EL1, ftr_id_aa64pfr2),
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0,
          &id_aa64zfr0_override),
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64SMFR0_EL1, ftr_id_aa64smfr0,
          &id_aa64smfr0_override),
 ARM64_FTR_REG(SYS_ID_AA64FPFR0_EL1, ftr_id_aa64fpfr0),

 /* Op1 = 0, CRn = 0, CRm = 5 */
 ARM64_FTR_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0),
 ARM64_FTR_REG(SYS_ID_AA64DFR1_EL1, ftr_raz),

 /* Op1 = 0, CRn = 0, CRm = 6 */
 ARM64_FTR_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0),
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1,
          &id_aa64isar1_override),
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2,
          &id_aa64isar2_override),
 ARM64_FTR_REG(SYS_ID_AA64ISAR3_EL1, ftr_id_aa64isar3),

 /* Op1 = 0, CRn = 0, CRm = 7 */
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0,
          &id_aa64mmfr0_override),
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1,
          &id_aa64mmfr1_override),
 ARM64_FTR_REG_OVERRIDE(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2,
          &id_aa64mmfr2_override),
 ARM64_FTR_REG(SYS_ID_AA64MMFR3_EL1, ftr_id_aa64mmfr3),
 ARM64_FTR_REG(SYS_ID_AA64MMFR4_EL1, ftr_id_aa64mmfr4),

 /* Op1 = 0, CRn = 10, CRm = 4 */
 ARM64_FTR_REG(SYS_MPAMIDR_EL1, ftr_mpamidr),

 /* Op1 = 1, CRn = 0, CRm = 0 */
 ARM64_FTR_REG(SYS_GMID_EL1, ftr_gmid),

 /* Op1 = 3, CRn = 0, CRm = 0 */
 { SYS_CTR_EL0, &arm64_ftr_reg_ctrel0 },
 ARM64_FTR_REG(SYS_DCZID_EL0, ftr_dczid),

 /* Op1 = 3, CRn = 14, CRm = 0 */
 ARM64_FTR_REG(SYS_CNTFRQ_EL0, ftr_single32),
};

static int search_cmp_ftr_reg(const void *id, const void *regp)
{
 return (int)(unsigned long)id - (int)((const struct __ftr_reg_entry *)regp)->sys_id;
}

/*
 * get_arm64_ftr_reg_nowarn - Looks up a feature register entry using
 * its sys_reg() encoding. With the array arm64_ftr_regs sorted in the
 * ascending order of sys_id, we use binary search to find a matching
 * entry.
 *
 * returns - Upon success,  matching ftr_reg entry for id.
 *         - NULL on failure. It is upto the caller to decide
 *      the impact of a failure.
 */

static struct arm64_ftr_reg *get_arm64_ftr_reg_nowarn(u32 sys_id)
{
 const struct __ftr_reg_entry *ret;

 ret = bsearch((const void *)(unsigned long)sys_id,
   arm64_ftr_regs,
   ARRAY_SIZE(arm64_ftr_regs),
   sizeof(arm64_ftr_regs[0]),
   search_cmp_ftr_reg);
 if (ret)
  return ret->reg;
 return NULL;
}

/*
 * get_arm64_ftr_reg - Looks up a feature register entry using
 * its sys_reg() encoding. This calls get_arm64_ftr_reg_nowarn().
 *
 * returns - Upon success,  matching ftr_reg entry for id.
 *         - NULL on failure but with an WARN_ON().
 */

struct arm64_ftr_reg *get_arm64_ftr_reg(u32 sys_id)
{
 struct arm64_ftr_reg *reg;

 reg = get_arm64_ftr_reg_nowarn(sys_id);

 /*
 * Requesting a non-existent register search is an error. Warn
 * and let the caller handle it.
 */

 WARN_ON(!reg);
 return reg;
}

static u64 arm64_ftr_set_value(const struct arm64_ftr_bits *ftrp, s64 reg,
          s64 ftr_val)
{
 u64 mask = arm64_ftr_mask(ftrp);

 reg &= ~mask;
 reg |= (ftr_val << ftrp->shift) & mask;
 return reg;
}

s64 arm64_ftr_safe_value(const struct arm64_ftr_bits *ftrp, s64 new,
    s64 cur)
{
 s64 ret = 0;

 switch (ftrp->type) {
 case FTR_EXACT:
  ret = ftrp->safe_val;
  break;
 case FTR_LOWER_SAFE:
  ret = min(new, cur);
  break;
 case FTR_HIGHER_OR_ZERO_SAFE:
  if (!cur || !new)
   break;
  fallthrough;
 case FTR_HIGHER_SAFE:
  ret = max(new, cur);
  break;
 default:
  BUG();
 }

 return ret;
}

static void __init sort_ftr_regs(void)
{
 unsigned int i;

 for (i = 0; i < ARRAY_SIZE(arm64_ftr_regs); i++) {
  const struct arm64_ftr_reg *ftr_reg = arm64_ftr_regs[i].reg;
  const struct arm64_ftr_bits *ftr_bits = ftr_reg->ftr_bits;
  unsigned int j = 0;

  /*
 * Features here must be sorted in descending order with respect
 * to their shift values and should not overlap with each other.
 */

  for (; ftr_bits->width != 0; ftr_bits++, j++) {
   unsigned int width = ftr_reg->ftr_bits[j].width;
   unsigned int shift = ftr_reg->ftr_bits[j].shift;
   unsigned int prev_shift;

   WARN((shift  + width) > 64,
    "%s has invalid feature at shift %d\n",
    ftr_reg->name, shift);

   /*
 * Skip the first feature. There is nothing to
 * compare against for now.
 */

   if (j == 0)
    continue;

   prev_shift = ftr_reg->ftr_bits[j - 1].shift;
   WARN((shift + width) > prev_shift,
    "%s has feature overlap at shift %d\n",
    ftr_reg->name, shift);
  }

  /*
 * Skip the first register. There is nothing to
 * compare against for now.
 */

  if (i == 0)
   continue;
  /*
 * Registers here must be sorted in ascending order with respect
 * to sys_id for subsequent binary search in get_arm64_ftr_reg()
 * to work correctly.
 */

  BUG_ON(arm64_ftr_regs[i].sys_id <= arm64_ftr_regs[i - 1].sys_id);
 }
}

/*
 * Initialise the CPU feature register from Boot CPU values.
 * Also initiliases the strict_mask for the register.
 * Any bits that are not covered by an arm64_ftr_bits entry are considered
 * RES0 for the system-wide value, and must strictly match.
 */

static void init_cpu_ftr_reg(u32 sys_reg, u64 new)
{
 u64 val = 0;
 u64 strict_mask = ~0x0ULL;
 u64 user_mask = 0;
 u64 valid_mask = 0;

 const struct arm64_ftr_bits *ftrp;
 struct arm64_ftr_reg *reg = get_arm64_ftr_reg(sys_reg);

 if (!reg)
  return;

 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  u64 ftr_mask = arm64_ftr_mask(ftrp);
  s64 ftr_new = arm64_ftr_value(ftrp, new);
  s64 ftr_ovr = arm64_ftr_value(ftrp, reg->override->val);

  if ((ftr_mask & reg->override->mask) == ftr_mask) {
   s64 tmp = arm64_ftr_safe_value(ftrp, ftr_ovr, ftr_new);
   char *str = NULL;

   if (ftr_ovr != tmp) {
    /* Unsafe, remove the override */
    reg->override->mask &= ~ftr_mask;
    reg->override->val &= ~ftr_mask;
    tmp = ftr_ovr;
    str = "ignoring override";
   } else if (ftr_new != tmp) {
    /* Override was valid */
    ftr_new = tmp;
    str = "forced";
   } else {
    /* Override was the safe value */
    str = "already set";
   }

   pr_warn("%s[%d:%d]: %s to %llx\n",
    reg->name,
    ftrp->shift + ftrp->width - 1,
    ftrp->shift, str,
    tmp & (BIT(ftrp->width) - 1));
  } else if ((ftr_mask & reg->override->val) == ftr_mask) {
   reg->override->val &= ~ftr_mask;
   pr_warn("%s[%d:%d]: impossible override, ignored\n",
    reg->name,
    ftrp->shift + ftrp->width - 1,
    ftrp->shift);
  }

  val = arm64_ftr_set_value(ftrp, val, ftr_new);

  valid_mask |= ftr_mask;
  if (!ftrp->strict)
   strict_mask &= ~ftr_mask;
  if (ftrp->visible)
   user_mask |= ftr_mask;
  else
   reg->user_val = arm64_ftr_set_value(ftrp,
           reg->user_val,
           ftrp->safe_val);
 }

 val &= valid_mask;

 reg->sys_val = val;
 reg->strict_mask = strict_mask;
 reg->user_mask = user_mask;
}

extern const struct arm64_cpu_capabilities arm64_errata[];
static const struct arm64_cpu_capabilities arm64_features[];

static void __init
init_cpucap_indirect_list_from_array(const struct arm64_cpu_capabilities *caps)
{
 for (; caps->matches; caps++) {
  if (WARN(caps->capability >= ARM64_NCAPS,
   "Invalid capability %d\n", caps->capability))
   continue;
  if (WARN(cpucap_ptrs[caps->capability],
   "Duplicate entry for capability %d\n",
   caps->capability))
   continue;
  cpucap_ptrs[caps->capability] = caps;
 }
}

static void __init init_cpucap_indirect_list(void)
{
 init_cpucap_indirect_list_from_array(arm64_features);
 init_cpucap_indirect_list_from_array(arm64_errata);
}

static void __init setup_boot_cpu_capabilities(void);

static void init_32bit_cpu_features(struct cpuinfo_32bit *info)
{
 init_cpu_ftr_reg(SYS_ID_DFR0_EL1, info->reg_id_dfr0);
 init_cpu_ftr_reg(SYS_ID_DFR1_EL1, info->reg_id_dfr1);
 init_cpu_ftr_reg(SYS_ID_ISAR0_EL1, info->reg_id_isar0);
 init_cpu_ftr_reg(SYS_ID_ISAR1_EL1, info->reg_id_isar1);
 init_cpu_ftr_reg(SYS_ID_ISAR2_EL1, info->reg_id_isar2);
 init_cpu_ftr_reg(SYS_ID_ISAR3_EL1, info->reg_id_isar3);
 init_cpu_ftr_reg(SYS_ID_ISAR4_EL1, info->reg_id_isar4);
 init_cpu_ftr_reg(SYS_ID_ISAR5_EL1, info->reg_id_isar5);
 init_cpu_ftr_reg(SYS_ID_ISAR6_EL1, info->reg_id_isar6);
 init_cpu_ftr_reg(SYS_ID_MMFR0_EL1, info->reg_id_mmfr0);
 init_cpu_ftr_reg(SYS_ID_MMFR1_EL1, info->reg_id_mmfr1);
 init_cpu_ftr_reg(SYS_ID_MMFR2_EL1, info->reg_id_mmfr2);
 init_cpu_ftr_reg(SYS_ID_MMFR3_EL1, info->reg_id_mmfr3);
 init_cpu_ftr_reg(SYS_ID_MMFR4_EL1, info->reg_id_mmfr4);
 init_cpu_ftr_reg(SYS_ID_MMFR5_EL1, info->reg_id_mmfr5);
 init_cpu_ftr_reg(SYS_ID_PFR0_EL1, info->reg_id_pfr0);
 init_cpu_ftr_reg(SYS_ID_PFR1_EL1, info->reg_id_pfr1);
 init_cpu_ftr_reg(SYS_ID_PFR2_EL1, info->reg_id_pfr2);
 init_cpu_ftr_reg(SYS_MVFR0_EL1, info->reg_mvfr0);
 init_cpu_ftr_reg(SYS_MVFR1_EL1, info->reg_mvfr1);
 init_cpu_ftr_reg(SYS_MVFR2_EL1, info->reg_mvfr2);
}

#ifdef CONFIG_ARM64_PSEUDO_NMI
static bool enable_pseudo_nmi;

static int __init early_enable_pseudo_nmi(char *p)
{
 return kstrtobool(p, &enable_pseudo_nmi);
}
early_param("irqchip.gicv3_pseudo_nmi", early_enable_pseudo_nmi);

static __init void detect_system_supports_pseudo_nmi(void)
{
 struct device_node *np;

 if (!enable_pseudo_nmi)
  return;

 /*
 * Detect broken MediaTek firmware that doesn't properly save and
 * restore GIC priorities.
 */

 np = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
 if (np && of_property_read_bool(np, "mediatek,broken-save-restore-fw")) {
  pr_info("Pseudo-NMI disabled due to MediaTek Chromebook GICR save problem\n");
  enable_pseudo_nmi = false;
 }
 of_node_put(np);
}
#else /* CONFIG_ARM64_PSEUDO_NMI */
static inline void detect_system_supports_pseudo_nmi(void) { }
#endif

void __init init_cpu_features(struct cpuinfo_arm64 *info)
{
 /* Before we start using the tables, make sure it is sorted */
 sort_ftr_regs();

 init_cpu_ftr_reg(SYS_CTR_EL0, info->reg_ctr);
 init_cpu_ftr_reg(SYS_DCZID_EL0, info->reg_dczid);
 init_cpu_ftr_reg(SYS_CNTFRQ_EL0, info->reg_cntfrq);
 init_cpu_ftr_reg(SYS_ID_AA64DFR0_EL1, info->reg_id_aa64dfr0);
 init_cpu_ftr_reg(SYS_ID_AA64DFR1_EL1, info->reg_id_aa64dfr1);
 init_cpu_ftr_reg(SYS_ID_AA64ISAR0_EL1, info->reg_id_aa64isar0);
 init_cpu_ftr_reg(SYS_ID_AA64ISAR1_EL1, info->reg_id_aa64isar1);
 init_cpu_ftr_reg(SYS_ID_AA64ISAR2_EL1, info->reg_id_aa64isar2);
 init_cpu_ftr_reg(SYS_ID_AA64ISAR3_EL1, info->reg_id_aa64isar3);
 init_cpu_ftr_reg(SYS_ID_AA64MMFR0_EL1, info->reg_id_aa64mmfr0);
 init_cpu_ftr_reg(SYS_ID_AA64MMFR1_EL1, info->reg_id_aa64mmfr1);
 init_cpu_ftr_reg(SYS_ID_AA64MMFR2_EL1, info->reg_id_aa64mmfr2);
 init_cpu_ftr_reg(SYS_ID_AA64MMFR3_EL1, info->reg_id_aa64mmfr3);
 init_cpu_ftr_reg(SYS_ID_AA64MMFR4_EL1, info->reg_id_aa64mmfr4);
 init_cpu_ftr_reg(SYS_ID_AA64PFR0_EL1, info->reg_id_aa64pfr0);
 init_cpu_ftr_reg(SYS_ID_AA64PFR1_EL1, info->reg_id_aa64pfr1);
 init_cpu_ftr_reg(SYS_ID_AA64PFR2_EL1, info->reg_id_aa64pfr2);
 init_cpu_ftr_reg(SYS_ID_AA64ZFR0_EL1, info->reg_id_aa64zfr0);
 init_cpu_ftr_reg(SYS_ID_AA64SMFR0_EL1, info->reg_id_aa64smfr0);
 init_cpu_ftr_reg(SYS_ID_AA64FPFR0_EL1, info->reg_id_aa64fpfr0);

 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0))
  init_32bit_cpu_features(&info->aarch32);

 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
     id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
  unsigned long cpacr = cpacr_save_enable_kernel_sve();

  vec_init_vq_map(ARM64_VEC_SVE);

  cpacr_restore(cpacr);
 }

 if (IS_ENABLED(CONFIG_ARM64_SME) &&
     id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
  unsigned long cpacr = cpacr_save_enable_kernel_sme();

  vec_init_vq_map(ARM64_VEC_SME);

  cpacr_restore(cpacr);
 }

 if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
  info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
  init_cpu_ftr_reg(SYS_MPAMIDR_EL1, info->reg_mpamidr);
 }

 if (id_aa64pfr1_mte(info->reg_id_aa64pfr1))
  init_cpu_ftr_reg(SYS_GMID_EL1, info->reg_gmid);
}

static void update_cpu_ftr_reg(struct arm64_ftr_reg *reg, u64 new)
{
 const struct arm64_ftr_bits *ftrp;

 for (ftrp = reg->ftr_bits; ftrp->width; ftrp++) {
  s64 ftr_cur = arm64_ftr_value(ftrp, reg->sys_val);
  s64 ftr_new = arm64_ftr_value(ftrp, new);

  if (ftr_cur == ftr_new)
   continue;
  /* Find a safe value */
  ftr_new = arm64_ftr_safe_value(ftrp, ftr_new, ftr_cur);
  reg->sys_val = arm64_ftr_set_value(ftrp, reg->sys_val, ftr_new);
 }

}

static int check_update_ftr_reg(u32 sys_id, int cpu, u64 val, u64 boot)
{
 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);

 if (!regp)
  return 0;

 update_cpu_ftr_reg(regp, val);
 if ((boot & regp->strict_mask) == (val & regp->strict_mask))
  return 0;
 pr_warn("SANITY CHECK: Unexpected variation in %s. Boot CPU: %#016llx, CPU%d: %#016llx\n",
   regp->name, boot, cpu, val);
 return 1;
}

static void relax_cpu_ftr_reg(u32 sys_id, int field)
{
 const struct arm64_ftr_bits *ftrp;
 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(sys_id);

 if (!regp)
  return;

 for (ftrp = regp->ftr_bits; ftrp->width; ftrp++) {
  if (ftrp->shift == field) {
   regp->strict_mask &= ~arm64_ftr_mask(ftrp);
   break;
  }
 }

 /* Bogus field? */
 WARN_ON(!ftrp->width);
}

static void lazy_init_32bit_cpu_features(struct cpuinfo_arm64 *info,
      struct cpuinfo_arm64 *boot)
{
 static bool boot_cpu_32bit_regs_overridden = false;

 if (!allow_mismatched_32bit_el0 || boot_cpu_32bit_regs_overridden)
  return;

 if (id_aa64pfr0_32bit_el0(boot->reg_id_aa64pfr0))
  return;

 boot->aarch32 = info->aarch32;
 init_32bit_cpu_features(&boot->aarch32);
 boot_cpu_32bit_regs_overridden = true;
}

static int update_32bit_cpu_features(int cpu, struct cpuinfo_32bit *info,
         struct cpuinfo_32bit *boot)
{
 int taint = 0;
 u64 pfr0 = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);

 /*
 * If we don't have AArch32 at EL1, then relax the strictness of
 * EL1-dependent register fields to avoid spurious sanity check fails.
 */

 if (!id_aa64pfr0_32bit_el1(pfr0)) {
  relax_cpu_ftr_reg(SYS_ID_ISAR4_EL1, ID_ISAR4_EL1_SMC_SHIFT);
  relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virt_frac_SHIFT);
  relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Sec_frac_SHIFT);
  relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Virtualization_SHIFT);
  relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_Security_SHIFT);
  relax_cpu_ftr_reg(SYS_ID_PFR1_EL1, ID_PFR1_EL1_ProgMod_SHIFT);
 }

 taint |= check_update_ftr_reg(SYS_ID_DFR0_EL1, cpu,
          info->reg_id_dfr0, boot->reg_id_dfr0);
 taint |= check_update_ftr_reg(SYS_ID_DFR1_EL1, cpu,
          info->reg_id_dfr1, boot->reg_id_dfr1);
 taint |= check_update_ftr_reg(SYS_ID_ISAR0_EL1, cpu,
          info->reg_id_isar0, boot->reg_id_isar0);
 taint |= check_update_ftr_reg(SYS_ID_ISAR1_EL1, cpu,
          info->reg_id_isar1, boot->reg_id_isar1);
 taint |= check_update_ftr_reg(SYS_ID_ISAR2_EL1, cpu,
          info->reg_id_isar2, boot->reg_id_isar2);
 taint |= check_update_ftr_reg(SYS_ID_ISAR3_EL1, cpu,
          info->reg_id_isar3, boot->reg_id_isar3);
 taint |= check_update_ftr_reg(SYS_ID_ISAR4_EL1, cpu,
          info->reg_id_isar4, boot->reg_id_isar4);
 taint |= check_update_ftr_reg(SYS_ID_ISAR5_EL1, cpu,
          info->reg_id_isar5, boot->reg_id_isar5);
 taint |= check_update_ftr_reg(SYS_ID_ISAR6_EL1, cpu,
          info->reg_id_isar6, boot->reg_id_isar6);

 /*
 * Regardless of the value of the AuxReg field, the AIFSR, ADFSR, and
 * ACTLR formats could differ across CPUs and therefore would have to
 * be trapped for virtualization anyway.
 */

 taint |= check_update_ftr_reg(SYS_ID_MMFR0_EL1, cpu,
          info->reg_id_mmfr0, boot->reg_id_mmfr0);
 taint |= check_update_ftr_reg(SYS_ID_MMFR1_EL1, cpu,
          info->reg_id_mmfr1, boot->reg_id_mmfr1);
 taint |= check_update_ftr_reg(SYS_ID_MMFR2_EL1, cpu,
          info->reg_id_mmfr2, boot->reg_id_mmfr2);
 taint |= check_update_ftr_reg(SYS_ID_MMFR3_EL1, cpu,
          info->reg_id_mmfr3, boot->reg_id_mmfr3);
 taint |= check_update_ftr_reg(SYS_ID_MMFR4_EL1, cpu,
          info->reg_id_mmfr4, boot->reg_id_mmfr4);
 taint |= check_update_ftr_reg(SYS_ID_MMFR5_EL1, cpu,
          info->reg_id_mmfr5, boot->reg_id_mmfr5);
 taint |= check_update_ftr_reg(SYS_ID_PFR0_EL1, cpu,
          info->reg_id_pfr0, boot->reg_id_pfr0);
 taint |= check_update_ftr_reg(SYS_ID_PFR1_EL1, cpu,
          info->reg_id_pfr1, boot->reg_id_pfr1);
 taint |= check_update_ftr_reg(SYS_ID_PFR2_EL1, cpu,
          info->reg_id_pfr2, boot->reg_id_pfr2);
 taint |= check_update_ftr_reg(SYS_MVFR0_EL1, cpu,
          info->reg_mvfr0, boot->reg_mvfr0);
 taint |= check_update_ftr_reg(SYS_MVFR1_EL1, cpu,
          info->reg_mvfr1, boot->reg_mvfr1);
 taint |= check_update_ftr_reg(SYS_MVFR2_EL1, cpu,
          info->reg_mvfr2, boot->reg_mvfr2);

 return taint;
}

/*
 * Update system wide CPU feature registers with the values from a
 * non-boot CPU. Also performs SANITY checks to make sure that there
 * aren't any insane variations from that of the boot CPU.
 */

void update_cpu_features(int cpu,
    struct cpuinfo_arm64 *info,
    struct cpuinfo_arm64 *boot)
{
 int taint = 0;

 /*
 * The kernel can handle differing I-cache policies, but otherwise
 * caches should look identical. Userspace JITs will make use of
 * *minLine.
 */

 taint |= check_update_ftr_reg(SYS_CTR_EL0, cpu,
          info->reg_ctr, boot->reg_ctr);

 /*
 * Userspace may perform DC ZVA instructions. Mismatched block sizes
 * could result in too much or too little memory being zeroed if a
 * process is preempted and migrated between CPUs.
 */

 taint |= check_update_ftr_reg(SYS_DCZID_EL0, cpu,
          info->reg_dczid, boot->reg_dczid);

 /* If different, timekeeping will be broken (especially with KVM) */
 taint |= check_update_ftr_reg(SYS_CNTFRQ_EL0, cpu,
          info->reg_cntfrq, boot->reg_cntfrq);

 /*
 * The kernel uses self-hosted debug features and expects CPUs to
 * support identical debug features. We presently need CTX_CMPs, WRPs,
 * and BRPs to be identical.
 * ID_AA64DFR1 is currently RES0.
 */

 taint |= check_update_ftr_reg(SYS_ID_AA64DFR0_EL1, cpu,
          info->reg_id_aa64dfr0, boot->reg_id_aa64dfr0);
 taint |= check_update_ftr_reg(SYS_ID_AA64DFR1_EL1, cpu,
          info->reg_id_aa64dfr1, boot->reg_id_aa64dfr1);
 /*
 * Even in big.LITTLE, processors should be identical instruction-set
 * wise.
 */

 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR0_EL1, cpu,
          info->reg_id_aa64isar0, boot->reg_id_aa64isar0);
 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR1_EL1, cpu,
          info->reg_id_aa64isar1, boot->reg_id_aa64isar1);
 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR2_EL1, cpu,
          info->reg_id_aa64isar2, boot->reg_id_aa64isar2);
 taint |= check_update_ftr_reg(SYS_ID_AA64ISAR3_EL1, cpu,
          info->reg_id_aa64isar3, boot->reg_id_aa64isar3);

 /*
 * Differing PARange support is fine as long as all peripherals and
 * memory are mapped within the minimum PARange of all CPUs.
 * Linux should not care about secure memory.
 */

 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR0_EL1, cpu,
          info->reg_id_aa64mmfr0, boot->reg_id_aa64mmfr0);
 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR1_EL1, cpu,
          info->reg_id_aa64mmfr1, boot->reg_id_aa64mmfr1);
 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR2_EL1, cpu,
          info->reg_id_aa64mmfr2, boot->reg_id_aa64mmfr2);
 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR3_EL1, cpu,
          info->reg_id_aa64mmfr3, boot->reg_id_aa64mmfr3);
 taint |= check_update_ftr_reg(SYS_ID_AA64MMFR4_EL1, cpu,
          info->reg_id_aa64mmfr4, boot->reg_id_aa64mmfr4);

 taint |= check_update_ftr_reg(SYS_ID_AA64PFR0_EL1, cpu,
          info->reg_id_aa64pfr0, boot->reg_id_aa64pfr0);
 taint |= check_update_ftr_reg(SYS_ID_AA64PFR1_EL1, cpu,
          info->reg_id_aa64pfr1, boot->reg_id_aa64pfr1);
 taint |= check_update_ftr_reg(SYS_ID_AA64PFR2_EL1, cpu,
          info->reg_id_aa64pfr2, boot->reg_id_aa64pfr2);

 taint |= check_update_ftr_reg(SYS_ID_AA64ZFR0_EL1, cpu,
          info->reg_id_aa64zfr0, boot->reg_id_aa64zfr0);

 taint |= check_update_ftr_reg(SYS_ID_AA64SMFR0_EL1, cpu,
          info->reg_id_aa64smfr0, boot->reg_id_aa64smfr0);

 taint |= check_update_ftr_reg(SYS_ID_AA64FPFR0_EL1, cpu,
          info->reg_id_aa64fpfr0, boot->reg_id_aa64fpfr0);

 /* Probe vector lengths */
 if (IS_ENABLED(CONFIG_ARM64_SVE) &&
     id_aa64pfr0_sve(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
  if (!system_capabilities_finalized()) {
   unsigned long cpacr = cpacr_save_enable_kernel_sve();

   vec_update_vq_map(ARM64_VEC_SVE);

   cpacr_restore(cpacr);
  }
 }

 if (IS_ENABLED(CONFIG_ARM64_SME) &&
     id_aa64pfr1_sme(read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1))) {
  unsigned long cpacr = cpacr_save_enable_kernel_sme();

  /* Probe vector lengths */
  if (!system_capabilities_finalized())
   vec_update_vq_map(ARM64_VEC_SME);

  cpacr_restore(cpacr);
 }

 if (id_aa64pfr0_mpam(read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1))) {
  info->reg_mpamidr = read_cpuid(MPAMIDR_EL1);
  taint |= check_update_ftr_reg(SYS_MPAMIDR_EL1, cpu,
     info->reg_mpamidr, boot->reg_mpamidr);
 }

 /*
 * The kernel uses the LDGM/STGM instructions and the number of tags
 * they read/write depends on the GMID_EL1.BS field. Check that the
 * value is the same on all CPUs.
 */

 if (IS_ENABLED(CONFIG_ARM64_MTE) &&
     id_aa64pfr1_mte(info->reg_id_aa64pfr1)) {
  taint |= check_update_ftr_reg(SYS_GMID_EL1, cpu,
           info->reg_gmid, boot->reg_gmid);
 }

 /*
 * If we don't have AArch32 at all then skip the checks entirely
 * as the register values may be UNKNOWN and we're not going to be
 * using them for anything.
 *
 * This relies on a sanitised view of the AArch64 ID registers
 * (e.g. SYS_ID_AA64PFR0_EL1), so we call it last.
 */

 if (id_aa64pfr0_32bit_el0(info->reg_id_aa64pfr0)) {
  lazy_init_32bit_cpu_features(info, boot);
  taint |= update_32bit_cpu_features(cpu, &info->aarch32,
         &boot->aarch32);
 }

 /*
 * Mismatched CPU features are a recipe for disaster. Don't even
 * pretend to support them.
 */

 if (taint) {
  pr_warn_once("Unsupported CPU feature variation detected.\n");
  add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
 }
}

u64 read_sanitised_ftr_reg(u32 id)
{
 struct arm64_ftr_reg *regp = get_arm64_ftr_reg(id);

 if (!regp)
  return 0;
 return regp->sys_val;
}
EXPORT_SYMBOL_GPL(read_sanitised_ftr_reg);

#define read_sysreg_case(r) \
 case r:  val = read_sysreg_s(r); break;

/*
--> --------------------

--> maximum size reached

--> --------------------

Messung V0.5
C=96 H=89 G=92

¤ Dauer der Verarbeitung: 0.9 Sekunden  (vorverarbeitet)  ¤

*© Formatika GbR, Deutschland






Wurzel

Suchen

Beweissystem der NASA

Beweissystem Isabelle

NIST Cobol Testsuite

Cephes Mathematical Library

Wiener Entwicklungsmethode

Haftungshinweis

Die Informationen auf dieser Webseite wurden nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit, noch Qualität der bereit gestellten Informationen zugesichert.

Bemerkung:

Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.