/* * rdt_domain structures are kfree()d when their last CPU goes offline, * and allocated when the first CPU in a new domain comes online. * The rdt_resource's domain list is updated when this happens. Readers of * the domain list must either take cpus_read_lock(), or rely on an RCU * read-side critical section, to avoid observing concurrent modification. * All writers take this mutex:
*/ static DEFINE_MUTEX(domain_list_lock);
/* * The cached resctrl_pqr_state is strictly per CPU and can never be * updated from a remote CPU. Functions which modify the state * are called with interrupts disabled and no preemption, which * is sufficient for the protection.
*/
DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
/* * Global boolean for rdt_alloc which is true if any * resource allocation is enabled.
*/ bool rdt_alloc_capable;
/* * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs * as they do not have CPUID enumeration support for Cache allocation. * The check for Vendor/Family/Model is not enough to guarantee that * the MSRs won't #GP fault because only the following SKUs support * CAT: * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz * * Probe by trying to write the first of the L3 cache mask registers * and checking that the bits stick. Max CLOSids is always 4 and max cbm length * is always 20 on hsw server parts. The minimum cache bitmask length * allowed for HSW server is always 2 bits. Hardcode all of them.
*/ staticinlinevoid cache_alloc_hsw_probe(void)
{ struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3]; struct rdt_resource *r = &hw_res->r_resctrl;
u64 max_cbm = BIT_ULL_MASK(20) - 1, l3_cbm_0;
if (wrmsrq_safe(MSR_IA32_L3_CBM_BASE, max_cbm)) return;
rdmsrq(MSR_IA32_L3_CBM_BASE, l3_cbm_0);
/* If all the bits were set in MSR, return success */ if (l3_cbm_0 != max_cbm) return;
/* * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values * exposed to user interface and the h/w understandable delay values. * * The non-linear delay values have the granularity of power of two * and also the h/w does not guarantee a curve for configured delay * values vs. actual b/w enforced. * Hence we need a mapping that is pre calibrated so the user can * express the memory b/w as a percentage value.
*/ staticinlinebool rdt_get_mb_table(struct rdt_resource *r)
{ /* * There are no Intel SKUs as of now to support non-linear delay.
*/
pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
boot_cpu_data.x86, boot_cpu_data.x86_model);
/* AMD does not use delay */
r->membw.delay_linear = false;
r->membw.arch_needs_linear = false;
/* * AMD does not use memory delay throttle model to control * the allocation like Intel does.
*/
r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
r->membw.min_bw = 0;
r->membw.bw_gran = 1;
r->alloc_capable = true;
returntrue;
}
staticvoid rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
{ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r); union cpuid_0x10_1_eax eax; union cpuid_0x10_x_ecx ecx; union cpuid_0x10_x_edx edx;
u32 ebx, default_ctrl;
staticvoid rdt_get_cdp_config(int level)
{ /* * By default, CDP is disabled. CDP can be enabled by mount parameter * "cdp" during resctrl file system mount time.
*/
rdt_resources_all[level].cdp_enabled = false;
rdt_resources_all[level].r_resctrl.cdp_capable = true;
}
for (i = m->low; i < m->high; i++)
wrmsrq(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
}
/* * Map the memory b/w percentage value to delay values * that can be written to QOS_MSRs. * There are currently no SKUs which support non linear delay values.
*/ static u32 delay_bw_map(unsignedlong bw, struct rdt_resource *r)
{ if (r->membw.delay_linear) return MAX_MBA_BW - bw;
pr_warn_once("Non Linear delay-bw map not supported but queried\n"); return MAX_MBA_BW;
}
/* * Initialize the Control MSRs to having no control. * For Cache Allocation: Set all bits in cbm * For Memory Allocation: Set b/w requested to 100%
*/ for (i = 0; i < hw_res->num_closid; i++, dc++)
*dc = resctrl_get_default_ctrl(r);
}
/** * arch_domain_mbm_alloc() - Allocate arch private storage for the MBM counters * @num_rmid: The size of the MBM counter array * @hw_dom: The domain that owns the allocated arrays
*/ staticint arch_domain_mbm_alloc(u32 num_rmid, struct rdt_hw_mon_domain *hw_dom)
{
size_t tsize;
if (resctrl_arch_is_mbm_total_enabled()) {
tsize = sizeof(*hw_dom->arch_mbm_total);
hw_dom->arch_mbm_total = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_total) return -ENOMEM;
} if (resctrl_arch_is_mbm_local_enabled()) {
tsize = sizeof(*hw_dom->arch_mbm_local);
hw_dom->arch_mbm_local = kcalloc(num_rmid, tsize, GFP_KERNEL); if (!hw_dom->arch_mbm_local) {
kfree(hw_dom->arch_mbm_total);
hw_dom->arch_mbm_total = NULL; return -ENOMEM;
}
}
return 0;
}
staticint get_domain_id_from_scope(int cpu, enum resctrl_scope scope)
{ switch (scope) { case RESCTRL_L2_CACHE: case RESCTRL_L3_CACHE: return get_cpu_cacheinfo_id(cpu, scope); case RESCTRL_L3_NODE: return cpu_to_node(cpu); default: break;
}
return -EINVAL;
}
staticvoid domain_add_cpu_ctrl(int cpu, struct rdt_resource *r)
{ int id = get_domain_id_from_scope(cpu, r->ctrl_scope); struct rdt_hw_ctrl_domain *hw_dom; struct list_head *add_pos = NULL; struct rdt_domain_hdr *hdr; struct rdt_ctrl_domain *d; int err;
lockdep_assert_held(&domain_list_lock);
if (id < 0) {
pr_warn_once("Can't find control domain id for CPU:%d scope:%d for resource %s\n",
cpu, r->ctrl_scope, r->name); return;
}
hdr = resctrl_find_domain(&r->ctrl_domains, id, &add_pos); if (hdr) { if (WARN_ON_ONCE(hdr->type != RESCTRL_CTRL_DOMAIN)) return;
d = container_of(hdr, struct rdt_ctrl_domain, hdr);
cpumask_set_cpu(cpu, &d->hdr.cpu_mask); if (r->cache.arch_has_per_cpu_cfg)
rdt_domain_reconfigure_cdp(r); return;
}
hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu)); if (!hw_dom) return;
if (id < 0) {
pr_warn_once("Can't find control domain id for CPU:%d scope:%d for resource %s\n",
cpu, r->ctrl_scope, r->name); return;
}
hdr = resctrl_find_domain(&r->ctrl_domains, id, NULL); if (!hdr) {
pr_warn("Can't find control domain for id=%d for CPU %d for resource %s\n",
id, cpu, r->name); return;
}
if (WARN_ON_ONCE(hdr->type != RESCTRL_CTRL_DOMAIN)) return;
d = container_of(hdr, struct rdt_ctrl_domain, hdr);
hw_dom = resctrl_to_arch_ctrl_dom(d);
cpumask_clear_cpu(cpu, &d->hdr.cpu_mask); if (cpumask_empty(&d->hdr.cpu_mask)) {
resctrl_offline_ctrl_domain(r, d);
list_del_rcu(&d->hdr.list);
synchronize_rcu();
/* * rdt_ctrl_domain "d" is going to be freed below, so clear * its pointer from pseudo_lock_region struct.
*/ if (d->plr)
d->plr->d = NULL;
ctrl_domain_free(hw_dom);
if (id < 0) {
pr_warn_once("Can't find monitor domain id for CPU:%d scope:%d for resource %s\n",
cpu, r->mon_scope, r->name); return;
}
hdr = resctrl_find_domain(&r->mon_domains, id, NULL); if (!hdr) {
pr_warn("Can't find monitor domain for id=%d for CPU %d for resource %s\n",
id, cpu, r->name); return;
}
if (WARN_ON_ONCE(hdr->type != RESCTRL_MON_DOMAIN)) return;
d = container_of(hdr, struct rdt_mon_domain, hdr);
hw_dom = resctrl_to_arch_mon_dom(d);
cpumask_clear_cpu(cpu, &d->hdr.cpu_mask); if (cpumask_empty(&d->hdr.cpu_mask)) {
resctrl_offline_mon_domain(r, d);
list_del_rcu(&d->hdr.list);
synchronize_rcu();
mon_domain_free(hw_dom);
return;
}
}
staticvoid domain_remove_cpu(int cpu, struct rdt_resource *r)
{ if (r->alloc_capable)
domain_remove_cpu_ctrl(cpu, r); if (r->mon_capable)
domain_remove_cpu_mon(cpu, r);
}
for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) { if (flag == o->flag) { if (o->force_off)
ret = false; if (o->force_on)
ret = true; break;
}
} return ret;
}
bool resctrl_arch_is_evt_configurable(enum resctrl_event_id evt)
{ if (!rdt_cpu_has(X86_FEATURE_BMEC)) returnfalse;
switch (evt) { case QOS_L3_MBM_TOTAL_EVENT_ID: return rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL); case QOS_L3_MBM_LOCAL_EVENT_ID: return rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL); default: returnfalse;
}
}
if (!boot_cpu_has(X86_FEATURE_RDT_A)) returnfalse;
if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
rdt_get_cache_alloc_cfg(1, r); if (rdt_cpu_has(X86_FEATURE_CDP_L3))
rdt_get_cdp_l3_config();
ret = true;
} if (rdt_cpu_has(X86_FEATURE_CAT_L2)) { /* CPUID 0x10.2 fields are same format at 0x10.1 */
r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
rdt_get_cache_alloc_cfg(2, r); if (rdt_cpu_has(X86_FEATURE_CDP_L2))
rdt_get_cdp_l2_config();
ret = true;
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.