x86/amd-iommu: Add per IOMMU reference counting
[linux/fpc-iii.git] / drivers / edac / edac_device.c
blobd5e13c94714f13bca93ae94927b55b144be9f10b
2 /*
3 * edac_device.c
4 * (C) 2007 www.douglaskthompson.com
6 * This file may be distributed under the terms of the
7 * GNU General Public License.
9 * Written by Doug Thompson <norsk5@xmission.com>
11 * edac_device API implementation
12 * 19 Jan 2007
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/smp.h>
18 #include <linux/init.h>
19 #include <linux/sysctl.h>
20 #include <linux/highmem.h>
21 #include <linux/timer.h>
22 #include <linux/slab.h>
23 #include <linux/jiffies.h>
24 #include <linux/spinlock.h>
25 #include <linux/list.h>
26 #include <linux/sysdev.h>
27 #include <linux/ctype.h>
28 #include <linux/workqueue.h>
29 #include <asm/uaccess.h>
30 #include <asm/page.h>
32 #include "edac_core.h"
33 #include "edac_module.h"
35 /* lock for the list: 'edac_device_list', manipulation of this list
36 * is protected by the 'device_ctls_mutex' lock
38 static DEFINE_MUTEX(device_ctls_mutex);
39 static LIST_HEAD(edac_device_list);
41 #ifdef CONFIG_EDAC_DEBUG
42 static void edac_device_dump_device(struct edac_device_ctl_info *edac_dev)
44 debugf3("\tedac_dev = %p dev_idx=%d \n", edac_dev, edac_dev->dev_idx);
45 debugf4("\tedac_dev->edac_check = %p\n", edac_dev->edac_check);
46 debugf3("\tdev = %p\n", edac_dev->dev);
47 debugf3("\tmod_name:ctl_name = %s:%s\n",
48 edac_dev->mod_name, edac_dev->ctl_name);
49 debugf3("\tpvt_info = %p\n\n", edac_dev->pvt_info);
51 #endif /* CONFIG_EDAC_DEBUG */
55 * edac_device_alloc_ctl_info()
56 * Allocate a new edac device control info structure
58 * The control structure is allocated in complete chunk
59 * from the OS. It is in turn sub allocated to the
60 * various objects that compose the struture
62 * The structure has a 'nr_instance' array within itself.
63 * Each instance represents a major component
64 * Example: L1 cache and L2 cache are 2 instance components
66 * Within each instance is an array of 'nr_blocks' blockoffsets
68 struct edac_device_ctl_info *edac_device_alloc_ctl_info(
69 unsigned sz_private,
70 char *edac_device_name, unsigned nr_instances,
71 char *edac_block_name, unsigned nr_blocks,
72 unsigned offset_value, /* zero, 1, or other based offset */
73 struct edac_dev_sysfs_block_attribute *attrib_spec, unsigned nr_attrib,
74 int device_index)
76 struct edac_device_ctl_info *dev_ctl;
77 struct edac_device_instance *dev_inst, *inst;
78 struct edac_device_block *dev_blk, *blk_p, *blk;
79 struct edac_dev_sysfs_block_attribute *dev_attrib, *attrib_p, *attrib;
80 unsigned total_size;
81 unsigned count;
82 unsigned instance, block, attr;
83 void *pvt;
84 int err;
86 debugf4("%s() instances=%d blocks=%d\n",
87 __func__, nr_instances, nr_blocks);
89 /* Calculate the size of memory we need to allocate AND
90 * determine the offsets of the various item arrays
91 * (instance,block,attrib) from the start of an allocated structure.
92 * We want the alignment of each item (instance,block,attrib)
93 * to be at least as stringent as what the compiler would
94 * provide if we could simply hardcode everything into a single struct.
96 dev_ctl = (struct edac_device_ctl_info *)NULL;
98 /* Calc the 'end' offset past end of ONE ctl_info structure
99 * which will become the start of the 'instance' array
101 dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst));
103 /* Calc the 'end' offset past the instance array within the ctl_info
104 * which will become the start of the block array
106 dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk));
108 /* Calc the 'end' offset past the dev_blk array
109 * which will become the start of the attrib array, if any.
111 count = nr_instances * nr_blocks;
112 dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib));
114 /* Check for case of when an attribute array is specified */
115 if (nr_attrib > 0) {
116 /* calc how many nr_attrib we need */
117 count *= nr_attrib;
119 /* Calc the 'end' offset past the attributes array */
120 pvt = edac_align_ptr(&dev_attrib[count], sz_private);
121 } else {
122 /* no attribute array specificed */
123 pvt = edac_align_ptr(dev_attrib, sz_private);
126 /* 'pvt' now points to where the private data area is.
127 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
128 * is baselined at ZERO
130 total_size = ((unsigned long)pvt) + sz_private;
132 /* Allocate the amount of memory for the set of control structures */
133 dev_ctl = kzalloc(total_size, GFP_KERNEL);
134 if (dev_ctl == NULL)
135 return NULL;
137 /* Adjust pointers so they point within the actual memory we
138 * just allocated rather than an imaginary chunk of memory
139 * located at address 0.
140 * 'dev_ctl' points to REAL memory, while the others are
141 * ZERO based and thus need to be adjusted to point within
142 * the allocated memory.
144 dev_inst = (struct edac_device_instance *)
145 (((char *)dev_ctl) + ((unsigned long)dev_inst));
146 dev_blk = (struct edac_device_block *)
147 (((char *)dev_ctl) + ((unsigned long)dev_blk));
148 dev_attrib = (struct edac_dev_sysfs_block_attribute *)
149 (((char *)dev_ctl) + ((unsigned long)dev_attrib));
150 pvt = sz_private ? (((char *)dev_ctl) + ((unsigned long)pvt)) : NULL;
152 /* Begin storing the information into the control info structure */
153 dev_ctl->dev_idx = device_index;
154 dev_ctl->nr_instances = nr_instances;
155 dev_ctl->instances = dev_inst;
156 dev_ctl->pvt_info = pvt;
158 /* Default logging of CEs and UEs */
159 dev_ctl->log_ce = 1;
160 dev_ctl->log_ue = 1;
162 /* Name of this edac device */
163 snprintf(dev_ctl->name,sizeof(dev_ctl->name),"%s",edac_device_name);
165 debugf4("%s() edac_dev=%p next after end=%p\n",
166 __func__, dev_ctl, pvt + sz_private );
168 /* Initialize every Instance */
169 for (instance = 0; instance < nr_instances; instance++) {
170 inst = &dev_inst[instance];
171 inst->ctl = dev_ctl;
172 inst->nr_blocks = nr_blocks;
173 blk_p = &dev_blk[instance * nr_blocks];
174 inst->blocks = blk_p;
176 /* name of this instance */
177 snprintf(inst->name, sizeof(inst->name),
178 "%s%u", edac_device_name, instance);
180 /* Initialize every block in each instance */
181 for (block = 0; block < nr_blocks; block++) {
182 blk = &blk_p[block];
183 blk->instance = inst;
184 snprintf(blk->name, sizeof(blk->name),
185 "%s%d", edac_block_name, block+offset_value);
187 debugf4("%s() instance=%d inst_p=%p block=#%d "
188 "block_p=%p name='%s'\n",
189 __func__, instance, inst, block,
190 blk, blk->name);
192 /* if there are NO attributes OR no attribute pointer
193 * then continue on to next block iteration
195 if ((nr_attrib == 0) || (attrib_spec == NULL))
196 continue;
198 /* setup the attribute array for this block */
199 blk->nr_attribs = nr_attrib;
200 attrib_p = &dev_attrib[block*nr_instances*nr_attrib];
201 blk->block_attributes = attrib_p;
203 debugf4("%s() THIS BLOCK_ATTRIB=%p\n",
204 __func__, blk->block_attributes);
206 /* Initialize every user specified attribute in this
207 * block with the data the caller passed in
208 * Each block gets its own copy of pointers,
209 * and its unique 'value'
211 for (attr = 0; attr < nr_attrib; attr++) {
212 attrib = &attrib_p[attr];
214 /* populate the unique per attrib
215 * with the code pointers and info
217 attrib->attr = attrib_spec[attr].attr;
218 attrib->show = attrib_spec[attr].show;
219 attrib->store = attrib_spec[attr].store;
221 attrib->block = blk; /* up link */
223 debugf4("%s() alloc-attrib=%p attrib_name='%s' "
224 "attrib-spec=%p spec-name=%s\n",
225 __func__, attrib, attrib->attr.name,
226 &attrib_spec[attr],
227 attrib_spec[attr].attr.name
233 /* Mark this instance as merely ALLOCATED */
234 dev_ctl->op_state = OP_ALLOC;
237 * Initialize the 'root' kobj for the edac_device controller
239 err = edac_device_register_sysfs_main_kobj(dev_ctl);
240 if (err) {
241 kfree(dev_ctl);
242 return NULL;
245 /* at this point, the root kobj is valid, and in order to
246 * 'free' the object, then the function:
247 * edac_device_unregister_sysfs_main_kobj() must be called
248 * which will perform kobj unregistration and the actual free
249 * will occur during the kobject callback operation
252 return dev_ctl;
254 EXPORT_SYMBOL_GPL(edac_device_alloc_ctl_info);
257 * edac_device_free_ctl_info()
258 * frees the memory allocated by the edac_device_alloc_ctl_info()
259 * function
261 void edac_device_free_ctl_info(struct edac_device_ctl_info *ctl_info)
263 edac_device_unregister_sysfs_main_kobj(ctl_info);
265 EXPORT_SYMBOL_GPL(edac_device_free_ctl_info);
268 * find_edac_device_by_dev
269 * scans the edac_device list for a specific 'struct device *'
271 * lock to be held prior to call: device_ctls_mutex
273 * Return:
274 * pointer to control structure managing 'dev'
275 * NULL if not found on list
277 static struct edac_device_ctl_info *find_edac_device_by_dev(struct device *dev)
279 struct edac_device_ctl_info *edac_dev;
280 struct list_head *item;
282 debugf0("%s()\n", __func__);
284 list_for_each(item, &edac_device_list) {
285 edac_dev = list_entry(item, struct edac_device_ctl_info, link);
287 if (edac_dev->dev == dev)
288 return edac_dev;
291 return NULL;
295 * add_edac_dev_to_global_list
296 * Before calling this function, caller must
297 * assign a unique value to edac_dev->dev_idx.
299 * lock to be held prior to call: device_ctls_mutex
301 * Return:
302 * 0 on success
303 * 1 on failure.
305 static int add_edac_dev_to_global_list(struct edac_device_ctl_info *edac_dev)
307 struct list_head *item, *insert_before;
308 struct edac_device_ctl_info *rover;
310 insert_before = &edac_device_list;
312 /* Determine if already on the list */
313 rover = find_edac_device_by_dev(edac_dev->dev);
314 if (unlikely(rover != NULL))
315 goto fail0;
317 /* Insert in ascending order by 'dev_idx', so find position */
318 list_for_each(item, &edac_device_list) {
319 rover = list_entry(item, struct edac_device_ctl_info, link);
321 if (rover->dev_idx >= edac_dev->dev_idx) {
322 if (unlikely(rover->dev_idx == edac_dev->dev_idx))
323 goto fail1;
325 insert_before = item;
326 break;
330 list_add_tail_rcu(&edac_dev->link, insert_before);
331 return 0;
333 fail0:
334 edac_printk(KERN_WARNING, EDAC_MC,
335 "%s (%s) %s %s already assigned %d\n",
336 dev_name(rover->dev), edac_dev_name(rover),
337 rover->mod_name, rover->ctl_name, rover->dev_idx);
338 return 1;
340 fail1:
341 edac_printk(KERN_WARNING, EDAC_MC,
342 "bug in low-level driver: attempt to assign\n"
343 " duplicate dev_idx %d in %s()\n", rover->dev_idx,
344 __func__);
345 return 1;
349 * complete_edac_device_list_del
351 * callback function when reference count is zero
353 static void complete_edac_device_list_del(struct rcu_head *head)
355 struct edac_device_ctl_info *edac_dev;
357 edac_dev = container_of(head, struct edac_device_ctl_info, rcu);
358 INIT_LIST_HEAD(&edac_dev->link);
362 * del_edac_device_from_global_list
364 * remove the RCU, setup for a callback call,
365 * then wait for the callback to occur
367 static void del_edac_device_from_global_list(struct edac_device_ctl_info
368 *edac_device)
370 list_del_rcu(&edac_device->link);
371 call_rcu(&edac_device->rcu, complete_edac_device_list_del);
372 rcu_barrier();
376 * edac_device_workq_function
377 * performs the operation scheduled by a workq request
379 * this workq is embedded within an edac_device_ctl_info
380 * structure, that needs to be polled for possible error events.
382 * This operation is to acquire the list mutex lock
383 * (thus preventing insertation or deletion)
384 * and then call the device's poll function IFF this device is
385 * running polled and there is a poll function defined.
387 static void edac_device_workq_function(struct work_struct *work_req)
389 struct delayed_work *d_work = to_delayed_work(work_req);
390 struct edac_device_ctl_info *edac_dev = to_edac_device_ctl_work(d_work);
392 mutex_lock(&device_ctls_mutex);
394 /* If we are being removed, bail out immediately */
395 if (edac_dev->op_state == OP_OFFLINE) {
396 mutex_unlock(&device_ctls_mutex);
397 return;
400 /* Only poll controllers that are running polled and have a check */
401 if ((edac_dev->op_state == OP_RUNNING_POLL) &&
402 (edac_dev->edac_check != NULL)) {
403 edac_dev->edac_check(edac_dev);
406 mutex_unlock(&device_ctls_mutex);
408 /* Reschedule the workq for the next time period to start again
409 * if the number of msec is for 1 sec, then adjust to the next
410 * whole one second to save timers fireing all over the period
411 * between integral seconds
413 if (edac_dev->poll_msec == 1000)
414 queue_delayed_work(edac_workqueue, &edac_dev->work,
415 round_jiffies_relative(edac_dev->delay));
416 else
417 queue_delayed_work(edac_workqueue, &edac_dev->work,
418 edac_dev->delay);
422 * edac_device_workq_setup
423 * initialize a workq item for this edac_device instance
424 * passing in the new delay period in msec
426 void edac_device_workq_setup(struct edac_device_ctl_info *edac_dev,
427 unsigned msec)
429 debugf0("%s()\n", __func__);
431 /* take the arg 'msec' and set it into the control structure
432 * to used in the time period calculation
433 * then calc the number of jiffies that represents
435 edac_dev->poll_msec = msec;
436 edac_dev->delay = msecs_to_jiffies(msec);
438 INIT_DELAYED_WORK(&edac_dev->work, edac_device_workq_function);
440 /* optimize here for the 1 second case, which will be normal value, to
441 * fire ON the 1 second time event. This helps reduce all sorts of
442 * timers firing on sub-second basis, while they are happy
443 * to fire together on the 1 second exactly
445 if (edac_dev->poll_msec == 1000)
446 queue_delayed_work(edac_workqueue, &edac_dev->work,
447 round_jiffies_relative(edac_dev->delay));
448 else
449 queue_delayed_work(edac_workqueue, &edac_dev->work,
450 edac_dev->delay);
454 * edac_device_workq_teardown
455 * stop the workq processing on this edac_dev
457 void edac_device_workq_teardown(struct edac_device_ctl_info *edac_dev)
459 int status;
461 status = cancel_delayed_work(&edac_dev->work);
462 if (status == 0) {
463 /* workq instance might be running, wait for it */
464 flush_workqueue(edac_workqueue);
469 * edac_device_reset_delay_period
471 * need to stop any outstanding workq queued up at this time
472 * because we will be resetting the sleep time.
473 * Then restart the workq on the new delay
475 void edac_device_reset_delay_period(struct edac_device_ctl_info *edac_dev,
476 unsigned long value)
478 /* cancel the current workq request, without the mutex lock */
479 edac_device_workq_teardown(edac_dev);
481 /* acquire the mutex before doing the workq setup */
482 mutex_lock(&device_ctls_mutex);
484 /* restart the workq request, with new delay value */
485 edac_device_workq_setup(edac_dev, value);
487 mutex_unlock(&device_ctls_mutex);
491 * edac_device_alloc_index: Allocate a unique device index number
493 * Return:
494 * allocated index number
496 int edac_device_alloc_index(void)
498 static atomic_t device_indexes = ATOMIC_INIT(0);
500 return atomic_inc_return(&device_indexes) - 1;
502 EXPORT_SYMBOL_GPL(edac_device_alloc_index);
505 * edac_device_add_device: Insert the 'edac_dev' structure into the
506 * edac_device global list and create sysfs entries associated with
507 * edac_device structure.
508 * @edac_device: pointer to the edac_device structure to be added to the list
509 * 'edac_device' structure.
511 * Return:
512 * 0 Success
513 * !0 Failure
515 int edac_device_add_device(struct edac_device_ctl_info *edac_dev)
517 debugf0("%s()\n", __func__);
519 #ifdef CONFIG_EDAC_DEBUG
520 if (edac_debug_level >= 3)
521 edac_device_dump_device(edac_dev);
522 #endif
523 mutex_lock(&device_ctls_mutex);
525 if (add_edac_dev_to_global_list(edac_dev))
526 goto fail0;
528 /* set load time so that error rate can be tracked */
529 edac_dev->start_time = jiffies;
531 /* create this instance's sysfs entries */
532 if (edac_device_create_sysfs(edac_dev)) {
533 edac_device_printk(edac_dev, KERN_WARNING,
534 "failed to create sysfs device\n");
535 goto fail1;
538 /* If there IS a check routine, then we are running POLLED */
539 if (edac_dev->edac_check != NULL) {
540 /* This instance is NOW RUNNING */
541 edac_dev->op_state = OP_RUNNING_POLL;
544 * enable workq processing on this instance,
545 * default = 1000 msec
547 edac_device_workq_setup(edac_dev, 1000);
548 } else {
549 edac_dev->op_state = OP_RUNNING_INTERRUPT;
552 /* Report action taken */
553 edac_device_printk(edac_dev, KERN_INFO,
554 "Giving out device to module '%s' controller "
555 "'%s': DEV '%s' (%s)\n",
556 edac_dev->mod_name,
557 edac_dev->ctl_name,
558 edac_dev_name(edac_dev),
559 edac_op_state_to_string(edac_dev->op_state));
561 mutex_unlock(&device_ctls_mutex);
562 return 0;
564 fail1:
565 /* Some error, so remove the entry from the lsit */
566 del_edac_device_from_global_list(edac_dev);
568 fail0:
569 mutex_unlock(&device_ctls_mutex);
570 return 1;
572 EXPORT_SYMBOL_GPL(edac_device_add_device);
575 * edac_device_del_device:
576 * Remove sysfs entries for specified edac_device structure and
577 * then remove edac_device structure from global list
579 * @pdev:
580 * Pointer to 'struct device' representing edac_device
581 * structure to remove.
583 * Return:
584 * Pointer to removed edac_device structure,
585 * OR NULL if device not found.
587 struct edac_device_ctl_info *edac_device_del_device(struct device *dev)
589 struct edac_device_ctl_info *edac_dev;
591 debugf0("%s()\n", __func__);
593 mutex_lock(&device_ctls_mutex);
595 /* Find the structure on the list, if not there, then leave */
596 edac_dev = find_edac_device_by_dev(dev);
597 if (edac_dev == NULL) {
598 mutex_unlock(&device_ctls_mutex);
599 return NULL;
602 /* mark this instance as OFFLINE */
603 edac_dev->op_state = OP_OFFLINE;
605 /* deregister from global list */
606 del_edac_device_from_global_list(edac_dev);
608 mutex_unlock(&device_ctls_mutex);
610 /* clear workq processing on this instance */
611 edac_device_workq_teardown(edac_dev);
613 /* Tear down the sysfs entries for this instance */
614 edac_device_remove_sysfs(edac_dev);
616 edac_printk(KERN_INFO, EDAC_MC,
617 "Removed device %d for %s %s: DEV %s\n",
618 edac_dev->dev_idx,
619 edac_dev->mod_name, edac_dev->ctl_name, edac_dev_name(edac_dev));
621 return edac_dev;
623 EXPORT_SYMBOL_GPL(edac_device_del_device);
625 static inline int edac_device_get_log_ce(struct edac_device_ctl_info *edac_dev)
627 return edac_dev->log_ce;
630 static inline int edac_device_get_log_ue(struct edac_device_ctl_info *edac_dev)
632 return edac_dev->log_ue;
635 static inline int edac_device_get_panic_on_ue(struct edac_device_ctl_info
636 *edac_dev)
638 return edac_dev->panic_on_ue;
642 * edac_device_handle_ce
643 * perform a common output and handling of an 'edac_dev' CE event
645 void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
646 int inst_nr, int block_nr, const char *msg)
648 struct edac_device_instance *instance;
649 struct edac_device_block *block = NULL;
651 if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
652 edac_device_printk(edac_dev, KERN_ERR,
653 "INTERNAL ERROR: 'instance' out of range "
654 "(%d >= %d)\n", inst_nr,
655 edac_dev->nr_instances);
656 return;
659 instance = edac_dev->instances + inst_nr;
661 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
662 edac_device_printk(edac_dev, KERN_ERR,
663 "INTERNAL ERROR: instance %d 'block' "
664 "out of range (%d >= %d)\n",
665 inst_nr, block_nr,
666 instance->nr_blocks);
667 return;
670 if (instance->nr_blocks > 0) {
671 block = instance->blocks + block_nr;
672 block->counters.ce_count++;
675 /* Propogate the count up the 'totals' tree */
676 instance->counters.ce_count++;
677 edac_dev->counters.ce_count++;
679 if (edac_device_get_log_ce(edac_dev))
680 edac_device_printk(edac_dev, KERN_WARNING,
681 "CE: %s instance: %s block: %s '%s'\n",
682 edac_dev->ctl_name, instance->name,
683 block ? block->name : "N/A", msg);
685 EXPORT_SYMBOL_GPL(edac_device_handle_ce);
688 * edac_device_handle_ue
689 * perform a common output and handling of an 'edac_dev' UE event
691 void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
692 int inst_nr, int block_nr, const char *msg)
694 struct edac_device_instance *instance;
695 struct edac_device_block *block = NULL;
697 if ((inst_nr >= edac_dev->nr_instances) || (inst_nr < 0)) {
698 edac_device_printk(edac_dev, KERN_ERR,
699 "INTERNAL ERROR: 'instance' out of range "
700 "(%d >= %d)\n", inst_nr,
701 edac_dev->nr_instances);
702 return;
705 instance = edac_dev->instances + inst_nr;
707 if ((block_nr >= instance->nr_blocks) || (block_nr < 0)) {
708 edac_device_printk(edac_dev, KERN_ERR,
709 "INTERNAL ERROR: instance %d 'block' "
710 "out of range (%d >= %d)\n",
711 inst_nr, block_nr,
712 instance->nr_blocks);
713 return;
716 if (instance->nr_blocks > 0) {
717 block = instance->blocks + block_nr;
718 block->counters.ue_count++;
721 /* Propogate the count up the 'totals' tree */
722 instance->counters.ue_count++;
723 edac_dev->counters.ue_count++;
725 if (edac_device_get_log_ue(edac_dev))
726 edac_device_printk(edac_dev, KERN_EMERG,
727 "UE: %s instance: %s block: %s '%s'\n",
728 edac_dev->ctl_name, instance->name,
729 block ? block->name : "N/A", msg);
731 if (edac_device_get_panic_on_ue(edac_dev))
732 panic("EDAC %s: UE instance: %s block %s '%s'\n",
733 edac_dev->ctl_name, instance->name,
734 block ? block->name : "N/A", msg);
736 EXPORT_SYMBOL_GPL(edac_device_handle_ue);