// SPDX-License-Identifier: GPL-2.0-only /* * User interface for Resource Allocation in Resource Director Technology(RDT) * * Copyright (C) 2016 Intel Corporation * * Author: Fenghua Yu <fenghua.yu@intel.com> * * More information about RDT be found in the Intel (R) x86 Architecture * Software Developer Manual.
*/
/* * This is safe against resctrl_arch_sched_in() called from __switch_to() * because __switch_to() is executed with interrupts disabled. A local call * from update_closid_rmid() is protected against __switch_to() because * preemption is disabled.
*/ void resctrl_arch_sync_cpu_closid_rmid(void *info)
{ struct resctrl_cpu_defaults *r = info;
if (r) {
this_cpu_write(pqr_state.default_closid, r->closid);
this_cpu_write(pqr_state.default_rmid, r->rmid);
}
/* * We cannot unconditionally write the MSR because the current * executing task might have its own closid selected. Just reuse * the context switch code.
*/
resctrl_arch_sched_in(current);
}
#define INVALID_CONFIG_INDEX UINT_MAX
/** * mon_event_config_index_get - get the hardware index for the * configurable event * @evtid: event id. * * Return: 0 for evtid == QOS_L3_MBM_TOTAL_EVENT_ID * 1 for evtid == QOS_L3_MBM_LOCAL_EVENT_ID * INVALID_CONFIG_INDEX for invalid evtid
*/ staticinlineunsignedint mon_event_config_index_get(u32 evtid)
{ switch (evtid) { case QOS_L3_MBM_TOTAL_EVENT_ID: return 0; case QOS_L3_MBM_LOCAL_EVENT_ID: return 1; default: /* Should never reach here */ return INVALID_CONFIG_INDEX;
}
}
if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL)) return -ENOMEM;
r_l = &rdt_resources_all[level].r_resctrl;
list_for_each_entry(d, &r_l->ctrl_domains, hdr.list) { if (r_l->cache.arch_has_per_cpu_cfg) /* Pick all the CPUs in the domain instance */
for_each_cpu(cpu, &d->hdr.cpu_mask)
cpumask_set_cpu(cpu, cpu_mask); else /* Pick one CPU from each domain instance to update MSR */
cpumask_set_cpu(cpumask_any(&d->hdr.cpu_mask), cpu_mask);
}
/* Update QOS_CFG MSR on all the CPUs in cpu_mask */
on_each_cpu_mask(cpu_mask, update, &enable, 1);
free_cpumask_var(cpu_mask);
return 0;
}
/* Restore the qos cfg state when a domain comes online */ void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
{ struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
if (!r->cdp_capable) return;
if (r->rid == RDT_RESOURCE_L2)
l2_qos_cfg_update(&hw_res->cdp_enabled);
if (r->rid == RDT_RESOURCE_L3)
l3_qos_cfg_update(&hw_res->cdp_enabled);
}
/* * Disable resource control for this resource by setting all * CBMs in all ctrl_domains to the maximum mask value. Pick one CPU * from each domain to update the MSRs below.
*/
list_for_each_entry(d, &r->ctrl_domains, hdr.list) {
hw_dom = resctrl_to_arch_ctrl_dom(d);
for (i = 0; i < hw_res->num_closid; i++)
hw_dom->ctrl_val[i] = resctrl_get_default_ctrl(r);
msr_param.dom = d;
smp_call_function_any(&d->hdr.cpu_mask, rdt_ctrl_update, &msr_param, 1);
}
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.