/* * edac_device.c * (C) 2007 www.douglaskthompson.com * * This file may be distributed under the terms of the * GNU General Public License. * * Written by Doug Thompson <norsk5@xmission.com> * * edac_device API implementation * 19 Jan 2007
*/
/* lock for the list: 'edac_device_list', manipulation of this list * is protected by the 'device_ctls_mutex' lock
*/ static DEFINE_MUTEX(device_ctls_mutex); static LIST_HEAD(edac_device_list);
/* Default workqueue processing interval on this instance, in msecs */ #define DEFAULT_POLL_INTERVAL 1000
/* Mark this instance as merely ALLOCATED */
dev_ctl->op_state = OP_ALLOC;
/* * Initialize the 'root' kobj for the edac_device controller
*/
err = edac_device_register_sysfs_main_kobj(dev_ctl); if (err) goto free;
/* at this point, the root kobj is valid, and in order to * 'free' the object, then the function: * edac_device_unregister_sysfs_main_kobj() must be called * which will perform kobj unregistration and the actual free * will occur during the kobject callback operation
*/
/* * find_edac_device_by_dev * scans the edac_device list for a specific 'struct device *' * * lock to be held prior to call: device_ctls_mutex * * Return: * pointer to control structure managing 'dev' * NULL if not found on list
*/ staticstruct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
{ struct edac_device_ctl_info *edac_dev; struct list_head *item;
/* * add_edac_dev_to_global_list * Before calling this function, caller must * assign a unique value to edac_dev->dev_idx. * * lock to be held prior to call: device_ctls_mutex * * Return: * 0 on success * 1 on failure.
*/ staticint add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
{ struct list_head *item, *insert_before; struct edac_device_ctl_info *rover;
insert_before = &edac_device_list;
/* Determine if already on the list */
rover = find_edac_device_by_dev(edac_dev->dev); if (unlikely(rover != NULL)) goto fail0;
/* Insert in ascending order by 'dev_idx', so find position */
list_for_each(item, &edac_device_list) {
rover = list_entry(item, struct edac_device_ctl_info, link);
if (rover->dev_idx >= edac_dev->dev_idx) { if (unlikely(rover->dev_idx == edac_dev->dev_idx)) goto fail1;
/* these are for safe removal of devices from global list while * NMI handlers may be traversing list
*/
synchronize_rcu();
INIT_LIST_HEAD(&edac_device->link);
}
/* * edac_device_workq_function * performs the operation scheduled by a workq request * * this workq is embedded within an edac_device_ctl_info * structure, that needs to be polled for possible error events. * * This operation is to acquire the list mutex lock * (thus preventing insertation or deletion) * and then call the device's poll function IFF this device is * running polled and there is a poll function defined.
*/ staticvoid edac_device_workq_function(struct work_struct *work_req)
{ struct delayed_work *d_work = to_delayed_work(work_req); struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
mutex_lock(&device_ctls_mutex);
/* If we are being removed, bail out immediately */ if (edac_dev->op_state == OP_OFFLINE) {
mutex_unlock(&device_ctls_mutex); return;
}
/* Only poll controllers that are running polled and have a check */ if ((edac_dev->op_state == OP_RUNNING_POLL) &&
(edac_dev->edac_check != NULL)) {
edac_dev->edac_check(edac_dev);
}
mutex_unlock(&device_ctls_mutex);
/* Reschedule the workq for the next time period to start again * if the number of msec is for 1 sec, then adjust to the next * whole one second to save timers firing all over the period * between integral seconds
*/ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else
edac_queue_work(&edac_dev->work, edac_dev->delay);
}
/* * edac_device_workq_setup * initialize a workq item for this edac_device instance * passing in the new delay period in msec
*/ staticvoid edac_device_workq_setup(struct edac_device_ctl_info *edac_dev, unsigned msec)
{
edac_dbg(0, "\n");
/* take the arg 'msec' and set it into the control structure * to used in the time period calculation * then calc the number of jiffies that represents
*/
edac_dev->poll_msec = msec;
edac_dev->delay = msecs_to_jiffies(msec);
/* optimize here for the 1 second case, which will be normal value, to * fire ON the 1 second time event. This helps reduce all sorts of * timers firing on sub-second basis, while they are happy * to fire together on the 1 second exactly
*/ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_queue_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else
edac_queue_work(&edac_dev->work, edac_dev->delay);
}
/* * edac_device_workq_teardown * stop the workq processing on this edac_dev
*/ staticvoid edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
{ if (!edac_dev->edac_check) return;
edac_dev->op_state = OP_OFFLINE;
edac_stop_work(&edac_dev->work);
}
/* * edac_device_reset_delay_period * * need to stop any outstanding workq queued up at this time * because we will be resetting the sleep time. * Then restart the workq on the new delay
*/ void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev, unsignedlong msec)
{
edac_dev->poll_msec = msec;
edac_dev->delay = msecs_to_jiffies(msec);
/* See comment in edac_device_workq_setup() above */ if (edac_dev->poll_msec == DEFAULT_POLL_INTERVAL)
edac_mod_work(&edac_dev->work, round_jiffies_relative(edac_dev->delay)); else
edac_mod_work(&edac_dev->work, edac_dev->delay);
}
int edac_device_alloc_index(void)
{ static atomic_t device_indexes = ATOMIC_INIT(0);
int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
{
edac_dbg(0, "\n");
#ifdef CONFIG_EDAC_DEBUG if (edac_debug_level >= 3)
edac_device_dump_device(edac_dev); #endif
mutex_lock(&device_ctls_mutex);
if (add_edac_dev_to_global_list(edac_dev)) goto fail0;
/* set load time so that error rate can be tracked */
edac_dev->start_time = jiffies;
/* create this instance's sysfs entries */ if (edac_device_create_sysfs(edac_dev)) {
edac_device_printk(edac_dev, KERN_WARNING, "failed to create sysfs device\n"); goto fail1;
}
/* If there IS a check routine, then we are running POLLED */ if (edac_dev->edac_check != NULL) { /* This instance is NOW RUNNING */
edac_dev->op_state = OP_RUNNING_POLL;
/* Find the structure on the list, if not there, then leave */
edac_dev = find_edac_device_by_dev(dev); if (edac_dev == NULL) {
mutex_unlock(&device_ctls_mutex); return NULL;
}
/* mark this instance as OFFLINE */
edac_dev->op_state = OP_OFFLINE;
/* deregister from global list */
del_edac_device_from_global_list(edac_dev);
mutex_unlock(&device_ctls_mutex);
/* clear workq processing on this instance */
edac_device_workq_teardown(edac_dev);
/* Tear down the sysfs entries for this instance */
edac_device_remove_sysfs(edac_dev);
edac_printk(KERN_INFO, EDAC_MC, "Removed device %d for %s %s: DEV %s\n",
edac_dev->dev_idx,
edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
/** * edac_dev_register - register device for RAS features with EDAC * @parent: parent device. * @name: name for the folder in the /sys/bus/edac/devices/, * which is derived from the parent device. * For e.g. /sys/bus/edac/devices/cxl_mem0/ * @private: parent driver's data to store in the context if any. * @num_features: number of RAS features to register. * @ras_features: list of RAS features to register. * * Return: * * %0 - Success. * * %-EINVAL - Invalid parameters passed. * * %-ENOMEM - Dynamic memory allocation failed. *
*/ int edac_dev_register(struct device *parent, char *name, void *private, int num_features, conststruct edac_dev_feature *ras_features)
{ conststruct attribute_group **ras_attr_groups; struct edac_dev_data *dev_data; struct edac_dev_feat_ctx *ctx; int mem_repair_cnt = 0; int attr_gcnt = 0; int ret = -ENOMEM; int scrub_cnt = 0; int feat;
if (!parent || !name || !num_features || !ras_features) return -EINVAL;
/* Double parse to make space for attributes */ for (feat = 0; feat < num_features; feat++) { switch (ras_features[feat].ft_type) { case RAS_FEAT_SCRUB:
attr_gcnt++;
scrub_cnt++; break; case RAS_FEAT_ECS:
attr_gcnt += ras_features[feat].ecs_info.num_media_frus; break; case RAS_FEAT_MEM_REPAIR:
attr_gcnt++;
mem_repair_cnt++; break; default: return -EINVAL;
}
}
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM;
Die Informationen auf dieser Webseite wurden
nach bestem Wissen sorgfältig zusammengestellt. Es wird jedoch weder Vollständigkeit, noch Richtigkeit,
noch Qualität der bereit gestellten Informationen zugesichert.
Bemerkung:
Die farbliche Syntaxdarstellung und die Messung sind noch experimentell.