mm-only debug patch...
[mmotm.git] / arch / powerpc / kernel / vio.c
blob77f64218abf3fd116031a3fd9b7fed71e0604e03
1 /*
2 * IBM PowerPC Virtual I/O Infrastructure Support.
4 * Copyright (c) 2003,2008 IBM Corp.
5 * Dave Engebretsen engebret@us.ibm.com
6 * Santiago Leon santil@us.ibm.com
7 * Hollis Blanchard <hollisb@us.ibm.com>
8 * Stephen Rothwell
9 * Robert Jennings <rcjenn@us.ibm.com>
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License
13 * as published by the Free Software Foundation; either version
14 * 2 of the License, or (at your option) any later version.
17 #include <linux/types.h>
18 #include <linux/device.h>
19 #include <linux/init.h>
20 #include <linux/console.h>
21 #include <linux/module.h>
22 #include <linux/mm.h>
23 #include <linux/dma-mapping.h>
24 #include <linux/kobject.h>
26 #include <asm/iommu.h>
27 #include <asm/dma.h>
28 #include <asm/vio.h>
29 #include <asm/prom.h>
30 #include <asm/firmware.h>
31 #include <asm/tce.h>
32 #include <asm/abs_addr.h>
33 #include <asm/page.h>
34 #include <asm/hvcall.h>
35 #include <asm/iseries/vio.h>
36 #include <asm/iseries/hv_types.h>
37 #include <asm/iseries/hv_lp_config.h>
38 #include <asm/iseries/hv_call_xm.h>
39 #include <asm/iseries/iommu.h>
41 static struct bus_type vio_bus_type;
43 static struct vio_dev vio_bus_device = { /* fake "parent" device */
44 .name = "vio",
45 .type = "",
46 .dev.init_name = "vio",
47 .dev.bus = &vio_bus_type,
50 #ifdef CONFIG_PPC_SMLPAR
51 /**
52 * vio_cmo_pool - A pool of IO memory for CMO use
54 * @size: The size of the pool in bytes
55 * @free: The amount of free memory in the pool
57 struct vio_cmo_pool {
58 size_t size;
59 size_t free;
62 /* How many ms to delay queued balance work */
63 #define VIO_CMO_BALANCE_DELAY 100
65 /* Portion out IO memory to CMO devices by this chunk size */
66 #define VIO_CMO_BALANCE_CHUNK 131072
68 /**
69 * vio_cmo_dev_entry - A device that is CMO-enabled and requires entitlement
71 * @vio_dev: struct vio_dev pointer
72 * @list: pointer to other devices on bus that are being tracked
74 struct vio_cmo_dev_entry {
75 struct vio_dev *viodev;
76 struct list_head list;
79 /**
80 * vio_cmo - VIO bus accounting structure for CMO entitlement
82 * @lock: spinlock for entire structure
83 * @balance_q: work queue for balancing system entitlement
84 * @device_list: list of CMO-enabled devices requiring entitlement
85 * @entitled: total system entitlement in bytes
86 * @reserve: pool of memory from which devices reserve entitlement, incl. spare
87 * @excess: pool of excess entitlement not needed for device reserves or spare
88 * @spare: IO memory for device hotplug functionality
89 * @min: minimum necessary for system operation
90 * @desired: desired memory for system operation
91 * @curr: bytes currently allocated
92 * @high: high water mark for IO data usage
94 struct vio_cmo {
95 spinlock_t lock;
96 struct delayed_work balance_q;
97 struct list_head device_list;
98 size_t entitled;
99 struct vio_cmo_pool reserve;
100 struct vio_cmo_pool excess;
101 size_t spare;
102 size_t min;
103 size_t desired;
104 size_t curr;
105 size_t high;
106 } vio_cmo;
109 * vio_cmo_OF_devices - Count the number of OF devices that have DMA windows
111 static int vio_cmo_num_OF_devs(void)
113 struct device_node *node_vroot;
114 int count = 0;
117 * Count the number of vdevice entries with an
118 * ibm,my-dma-window OF property
120 node_vroot = of_find_node_by_name(NULL, "vdevice");
121 if (node_vroot) {
122 struct device_node *of_node;
123 struct property *prop;
125 for_each_child_of_node(node_vroot, of_node) {
126 prop = of_find_property(of_node, "ibm,my-dma-window",
127 NULL);
128 if (prop)
129 count++;
132 of_node_put(node_vroot);
133 return count;
137 * vio_cmo_alloc - allocate IO memory for CMO-enable devices
139 * @viodev: VIO device requesting IO memory
140 * @size: size of allocation requested
142 * Allocations come from memory reserved for the devices and any excess
143 * IO memory available to all devices. The spare pool used to service
144 * hotplug must be equal to %VIO_CMO_MIN_ENT for the excess pool to be
145 * made available.
147 * Return codes:
148 * 0 for successful allocation and -ENOMEM for a failure
150 static inline int vio_cmo_alloc(struct vio_dev *viodev, size_t size)
152 unsigned long flags;
153 size_t reserve_free = 0;
154 size_t excess_free = 0;
155 int ret = -ENOMEM;
157 spin_lock_irqsave(&vio_cmo.lock, flags);
159 /* Determine the amount of free entitlement available in reserve */
160 if (viodev->cmo.entitled > viodev->cmo.allocated)
161 reserve_free = viodev->cmo.entitled - viodev->cmo.allocated;
163 /* If spare is not fulfilled, the excess pool can not be used. */
164 if (vio_cmo.spare >= VIO_CMO_MIN_ENT)
165 excess_free = vio_cmo.excess.free;
167 /* The request can be satisfied */
168 if ((reserve_free + excess_free) >= size) {
169 vio_cmo.curr += size;
170 if (vio_cmo.curr > vio_cmo.high)
171 vio_cmo.high = vio_cmo.curr;
172 viodev->cmo.allocated += size;
173 size -= min(reserve_free, size);
174 vio_cmo.excess.free -= size;
175 ret = 0;
178 spin_unlock_irqrestore(&vio_cmo.lock, flags);
179 return ret;
183 * vio_cmo_dealloc - deallocate IO memory from CMO-enable devices
184 * @viodev: VIO device freeing IO memory
185 * @size: size of deallocation
187 * IO memory is freed by the device back to the correct memory pools.
188 * The spare pool is replenished first from either memory pool, then
189 * the reserve pool is used to reduce device entitlement, the excess
190 * pool is used to increase the reserve pool toward the desired entitlement
191 * target, and then the remaining memory is returned to the pools.
194 static inline void vio_cmo_dealloc(struct vio_dev *viodev, size_t size)
196 unsigned long flags;
197 size_t spare_needed = 0;
198 size_t excess_freed = 0;
199 size_t reserve_freed = size;
200 size_t tmp;
201 int balance = 0;
203 spin_lock_irqsave(&vio_cmo.lock, flags);
204 vio_cmo.curr -= size;
206 /* Amount of memory freed from the excess pool */
207 if (viodev->cmo.allocated > viodev->cmo.entitled) {
208 excess_freed = min(reserve_freed, (viodev->cmo.allocated -
209 viodev->cmo.entitled));
210 reserve_freed -= excess_freed;
213 /* Remove allocation from device */
214 viodev->cmo.allocated -= (reserve_freed + excess_freed);
216 /* Spare is a subset of the reserve pool, replenish it first. */
217 spare_needed = VIO_CMO_MIN_ENT - vio_cmo.spare;
220 * Replenish the spare in the reserve pool from the excess pool.
221 * This moves entitlement into the reserve pool.
223 if (spare_needed && excess_freed) {
224 tmp = min(excess_freed, spare_needed);
225 vio_cmo.excess.size -= tmp;
226 vio_cmo.reserve.size += tmp;
227 vio_cmo.spare += tmp;
228 excess_freed -= tmp;
229 spare_needed -= tmp;
230 balance = 1;
234 * Replenish the spare in the reserve pool from the reserve pool.
235 * This removes entitlement from the device down to VIO_CMO_MIN_ENT,
236 * if needed, and gives it to the spare pool. The amount of used
237 * memory in this pool does not change.
239 if (spare_needed && reserve_freed) {
240 tmp = min(spare_needed, min(reserve_freed,
241 (viodev->cmo.entitled -
242 VIO_CMO_MIN_ENT)));
244 vio_cmo.spare += tmp;
245 viodev->cmo.entitled -= tmp;
246 reserve_freed -= tmp;
247 spare_needed -= tmp;
248 balance = 1;
252 * Increase the reserve pool until the desired allocation is met.
253 * Move an allocation freed from the excess pool into the reserve
254 * pool and schedule a balance operation.
256 if (excess_freed && (vio_cmo.desired > vio_cmo.reserve.size)) {
257 tmp = min(excess_freed, (vio_cmo.desired - vio_cmo.reserve.size));
259 vio_cmo.excess.size -= tmp;
260 vio_cmo.reserve.size += tmp;
261 excess_freed -= tmp;
262 balance = 1;
265 /* Return memory from the excess pool to that pool */
266 if (excess_freed)
267 vio_cmo.excess.free += excess_freed;
269 if (balance)
270 schedule_delayed_work(&vio_cmo.balance_q, VIO_CMO_BALANCE_DELAY);
271 spin_unlock_irqrestore(&vio_cmo.lock, flags);
275 * vio_cmo_entitlement_update - Manage system entitlement changes
277 * @new_entitlement: new system entitlement to attempt to accommodate
279 * Increases in entitlement will be used to fulfill the spare entitlement
280 * and the rest is given to the excess pool. Decreases, if they are
281 * possible, come from the excess pool and from unused device entitlement
283 * Returns: 0 on success, -ENOMEM when change can not be made
285 int vio_cmo_entitlement_update(size_t new_entitlement)
287 struct vio_dev *viodev;
288 struct vio_cmo_dev_entry *dev_ent;
289 unsigned long flags;
290 size_t avail, delta, tmp;
292 spin_lock_irqsave(&vio_cmo.lock, flags);
294 /* Entitlement increases */
295 if (new_entitlement > vio_cmo.entitled) {
296 delta = new_entitlement - vio_cmo.entitled;
298 /* Fulfill spare allocation */
299 if (vio_cmo.spare < VIO_CMO_MIN_ENT) {
300 tmp = min(delta, (VIO_CMO_MIN_ENT - vio_cmo.spare));
301 vio_cmo.spare += tmp;
302 vio_cmo.reserve.size += tmp;
303 delta -= tmp;
306 /* Remaining new allocation goes to the excess pool */
307 vio_cmo.entitled += delta;
308 vio_cmo.excess.size += delta;
309 vio_cmo.excess.free += delta;
311 goto out;
314 /* Entitlement decreases */
315 delta = vio_cmo.entitled - new_entitlement;
316 avail = vio_cmo.excess.free;
319 * Need to check how much unused entitlement each device can
320 * sacrifice to fulfill entitlement change.
322 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
323 if (avail >= delta)
324 break;
326 viodev = dev_ent->viodev;
327 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
328 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
329 avail += viodev->cmo.entitled -
330 max_t(size_t, viodev->cmo.allocated,
331 VIO_CMO_MIN_ENT);
334 if (delta <= avail) {
335 vio_cmo.entitled -= delta;
337 /* Take entitlement from the excess pool first */
338 tmp = min(vio_cmo.excess.free, delta);
339 vio_cmo.excess.size -= tmp;
340 vio_cmo.excess.free -= tmp;
341 delta -= tmp;
344 * Remove all but VIO_CMO_MIN_ENT bytes from devices
345 * until entitlement change is served
347 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
348 if (!delta)
349 break;
351 viodev = dev_ent->viodev;
352 tmp = 0;
353 if ((viodev->cmo.entitled > viodev->cmo.allocated) &&
354 (viodev->cmo.entitled > VIO_CMO_MIN_ENT))
355 tmp = viodev->cmo.entitled -
356 max_t(size_t, viodev->cmo.allocated,
357 VIO_CMO_MIN_ENT);
358 viodev->cmo.entitled -= min(tmp, delta);
359 delta -= min(tmp, delta);
361 } else {
362 spin_unlock_irqrestore(&vio_cmo.lock, flags);
363 return -ENOMEM;
366 out:
367 schedule_delayed_work(&vio_cmo.balance_q, 0);
368 spin_unlock_irqrestore(&vio_cmo.lock, flags);
369 return 0;
373 * vio_cmo_balance - Balance entitlement among devices
375 * @work: work queue structure for this operation
377 * Any system entitlement above the minimum needed for devices, or
378 * already allocated to devices, can be distributed to the devices.
379 * The list of devices is iterated through to recalculate the desired
380 * entitlement level and to determine how much entitlement above the
381 * minimum entitlement is allocated to devices.
383 * Small chunks of the available entitlement are given to devices until
384 * their requirements are fulfilled or there is no entitlement left to give.
385 * Upon completion sizes of the reserve and excess pools are calculated.
387 * The system minimum entitlement level is also recalculated here.
388 * Entitlement will be reserved for devices even after vio_bus_remove to
389 * accommodate reloading the driver. The OF tree is walked to count the
390 * number of devices present and this will remove entitlement for devices
391 * that have actually left the system after having vio_bus_remove called.
393 static void vio_cmo_balance(struct work_struct *work)
395 struct vio_cmo *cmo;
396 struct vio_dev *viodev;
397 struct vio_cmo_dev_entry *dev_ent;
398 unsigned long flags;
399 size_t avail = 0, level, chunk, need;
400 int devcount = 0, fulfilled;
402 cmo = container_of(work, struct vio_cmo, balance_q.work);
404 spin_lock_irqsave(&vio_cmo.lock, flags);
406 /* Calculate minimum entitlement and fulfill spare */
407 cmo->min = vio_cmo_num_OF_devs() * VIO_CMO_MIN_ENT;
408 BUG_ON(cmo->min > cmo->entitled);
409 cmo->spare = min_t(size_t, VIO_CMO_MIN_ENT, (cmo->entitled - cmo->min));
410 cmo->min += cmo->spare;
411 cmo->desired = cmo->min;
414 * Determine how much entitlement is available and reset device
415 * entitlements
417 avail = cmo->entitled - cmo->spare;
418 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
419 viodev = dev_ent->viodev;
420 devcount++;
421 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
422 cmo->desired += (viodev->cmo.desired - VIO_CMO_MIN_ENT);
423 avail -= max_t(size_t, viodev->cmo.allocated, VIO_CMO_MIN_ENT);
427 * Having provided each device with the minimum entitlement, loop
428 * over the devices portioning out the remaining entitlement
429 * until there is nothing left.
431 level = VIO_CMO_MIN_ENT;
432 while (avail) {
433 fulfilled = 0;
434 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
435 viodev = dev_ent->viodev;
437 if (viodev->cmo.desired <= level) {
438 fulfilled++;
439 continue;
443 * Give the device up to VIO_CMO_BALANCE_CHUNK
444 * bytes of entitlement, but do not exceed the
445 * desired level of entitlement for the device.
447 chunk = min_t(size_t, avail, VIO_CMO_BALANCE_CHUNK);
448 chunk = min(chunk, (viodev->cmo.desired -
449 viodev->cmo.entitled));
450 viodev->cmo.entitled += chunk;
453 * If the memory for this entitlement increase was
454 * already allocated to the device it does not come
455 * from the available pool being portioned out.
457 need = max(viodev->cmo.allocated, viodev->cmo.entitled)-
458 max(viodev->cmo.allocated, level);
459 avail -= need;
462 if (fulfilled == devcount)
463 break;
464 level += VIO_CMO_BALANCE_CHUNK;
467 /* Calculate new reserve and excess pool sizes */
468 cmo->reserve.size = cmo->min;
469 cmo->excess.free = 0;
470 cmo->excess.size = 0;
471 need = 0;
472 list_for_each_entry(dev_ent, &vio_cmo.device_list, list) {
473 viodev = dev_ent->viodev;
474 /* Calculated reserve size above the minimum entitlement */
475 if (viodev->cmo.entitled)
476 cmo->reserve.size += (viodev->cmo.entitled -
477 VIO_CMO_MIN_ENT);
478 /* Calculated used excess entitlement */
479 if (viodev->cmo.allocated > viodev->cmo.entitled)
480 need += viodev->cmo.allocated - viodev->cmo.entitled;
482 cmo->excess.size = cmo->entitled - cmo->reserve.size;
483 cmo->excess.free = cmo->excess.size - need;
485 cancel_delayed_work(to_delayed_work(work));
486 spin_unlock_irqrestore(&vio_cmo.lock, flags);
489 static void *vio_dma_iommu_alloc_coherent(struct device *dev, size_t size,
490 dma_addr_t *dma_handle, gfp_t flag)
492 struct vio_dev *viodev = to_vio_dev(dev);
493 void *ret;
495 if (vio_cmo_alloc(viodev, roundup(size, PAGE_SIZE))) {
496 atomic_inc(&viodev->cmo.allocs_failed);
497 return NULL;
500 ret = dma_iommu_ops.alloc_coherent(dev, size, dma_handle, flag);
501 if (unlikely(ret == NULL)) {
502 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
503 atomic_inc(&viodev->cmo.allocs_failed);
506 return ret;
509 static void vio_dma_iommu_free_coherent(struct device *dev, size_t size,
510 void *vaddr, dma_addr_t dma_handle)
512 struct vio_dev *viodev = to_vio_dev(dev);
514 dma_iommu_ops.free_coherent(dev, size, vaddr, dma_handle);
516 vio_cmo_dealloc(viodev, roundup(size, PAGE_SIZE));
519 static dma_addr_t vio_dma_iommu_map_page(struct device *dev, struct page *page,
520 unsigned long offset, size_t size,
521 enum dma_data_direction direction,
522 struct dma_attrs *attrs)
524 struct vio_dev *viodev = to_vio_dev(dev);
525 dma_addr_t ret = DMA_ERROR_CODE;
527 if (vio_cmo_alloc(viodev, roundup(size, IOMMU_PAGE_SIZE))) {
528 atomic_inc(&viodev->cmo.allocs_failed);
529 return ret;
532 ret = dma_iommu_ops.map_page(dev, page, offset, size, direction, attrs);
533 if (unlikely(dma_mapping_error(dev, ret))) {
534 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
535 atomic_inc(&viodev->cmo.allocs_failed);
538 return ret;
541 static void vio_dma_iommu_unmap_page(struct device *dev, dma_addr_t dma_handle,
542 size_t size,
543 enum dma_data_direction direction,
544 struct dma_attrs *attrs)
546 struct vio_dev *viodev = to_vio_dev(dev);
548 dma_iommu_ops.unmap_page(dev, dma_handle, size, direction, attrs);
550 vio_cmo_dealloc(viodev, roundup(size, IOMMU_PAGE_SIZE));
553 static int vio_dma_iommu_map_sg(struct device *dev, struct scatterlist *sglist,
554 int nelems, enum dma_data_direction direction,
555 struct dma_attrs *attrs)
557 struct vio_dev *viodev = to_vio_dev(dev);
558 struct scatterlist *sgl;
559 int ret, count = 0;
560 size_t alloc_size = 0;
562 for (sgl = sglist; count < nelems; count++, sgl++)
563 alloc_size += roundup(sgl->length, IOMMU_PAGE_SIZE);
565 if (vio_cmo_alloc(viodev, alloc_size)) {
566 atomic_inc(&viodev->cmo.allocs_failed);
567 return 0;
570 ret = dma_iommu_ops.map_sg(dev, sglist, nelems, direction, attrs);
572 if (unlikely(!ret)) {
573 vio_cmo_dealloc(viodev, alloc_size);
574 atomic_inc(&viodev->cmo.allocs_failed);
575 return ret;
578 for (sgl = sglist, count = 0; count < ret; count++, sgl++)
579 alloc_size -= roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
580 if (alloc_size)
581 vio_cmo_dealloc(viodev, alloc_size);
583 return ret;
586 static void vio_dma_iommu_unmap_sg(struct device *dev,
587 struct scatterlist *sglist, int nelems,
588 enum dma_data_direction direction,
589 struct dma_attrs *attrs)
591 struct vio_dev *viodev = to_vio_dev(dev);
592 struct scatterlist *sgl;
593 size_t alloc_size = 0;
594 int count = 0;
596 for (sgl = sglist; count < nelems; count++, sgl++)
597 alloc_size += roundup(sgl->dma_length, IOMMU_PAGE_SIZE);
599 dma_iommu_ops.unmap_sg(dev, sglist, nelems, direction, attrs);
601 vio_cmo_dealloc(viodev, alloc_size);
604 struct dma_map_ops vio_dma_mapping_ops = {
605 .alloc_coherent = vio_dma_iommu_alloc_coherent,
606 .free_coherent = vio_dma_iommu_free_coherent,
607 .map_sg = vio_dma_iommu_map_sg,
608 .unmap_sg = vio_dma_iommu_unmap_sg,
609 .map_page = vio_dma_iommu_map_page,
610 .unmap_page = vio_dma_iommu_unmap_page,
615 * vio_cmo_set_dev_desired - Set desired entitlement for a device
617 * @viodev: struct vio_dev for device to alter
618 * @new_desired: new desired entitlement level in bytes
620 * For use by devices to request a change to their entitlement at runtime or
621 * through sysfs. The desired entitlement level is changed and a balancing
622 * of system resources is scheduled to run in the future.
624 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired)
626 unsigned long flags;
627 struct vio_cmo_dev_entry *dev_ent;
628 int found = 0;
630 if (!firmware_has_feature(FW_FEATURE_CMO))
631 return;
633 spin_lock_irqsave(&vio_cmo.lock, flags);
634 if (desired < VIO_CMO_MIN_ENT)
635 desired = VIO_CMO_MIN_ENT;
638 * Changes will not be made for devices not in the device list.
639 * If it is not in the device list, then no driver is loaded
640 * for the device and it can not receive entitlement.
642 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
643 if (viodev == dev_ent->viodev) {
644 found = 1;
645 break;
647 if (!found)
648 return;
650 /* Increase/decrease in desired device entitlement */
651 if (desired >= viodev->cmo.desired) {
652 /* Just bump the bus and device values prior to a balance*/
653 vio_cmo.desired += desired - viodev->cmo.desired;
654 viodev->cmo.desired = desired;
655 } else {
656 /* Decrease bus and device values for desired entitlement */
657 vio_cmo.desired -= viodev->cmo.desired - desired;
658 viodev->cmo.desired = desired;
660 * If less entitlement is desired than current entitlement, move
661 * any reserve memory in the change region to the excess pool.
663 if (viodev->cmo.entitled > desired) {
664 vio_cmo.reserve.size -= viodev->cmo.entitled - desired;
665 vio_cmo.excess.size += viodev->cmo.entitled - desired;
667 * If entitlement moving from the reserve pool to the
668 * excess pool is currently unused, add to the excess
669 * free counter.
671 if (viodev->cmo.allocated < viodev->cmo.entitled)
672 vio_cmo.excess.free += viodev->cmo.entitled -
673 max(viodev->cmo.allocated, desired);
674 viodev->cmo.entitled = desired;
677 schedule_delayed_work(&vio_cmo.balance_q, 0);
678 spin_unlock_irqrestore(&vio_cmo.lock, flags);
682 * vio_cmo_bus_probe - Handle CMO specific bus probe activities
684 * @viodev - Pointer to struct vio_dev for device
686 * Determine the devices IO memory entitlement needs, attempting
687 * to satisfy the system minimum entitlement at first and scheduling
688 * a balance operation to take care of the rest at a later time.
690 * Returns: 0 on success, -EINVAL when device doesn't support CMO, and
691 * -ENOMEM when entitlement is not available for device or
692 * device entry.
695 static int vio_cmo_bus_probe(struct vio_dev *viodev)
697 struct vio_cmo_dev_entry *dev_ent;
698 struct device *dev = &viodev->dev;
699 struct vio_driver *viodrv = to_vio_driver(dev->driver);
700 unsigned long flags;
701 size_t size;
704 * Check to see that device has a DMA window and configure
705 * entitlement for the device.
707 if (of_get_property(viodev->dev.archdata.of_node,
708 "ibm,my-dma-window", NULL)) {
709 /* Check that the driver is CMO enabled and get desired DMA */
710 if (!viodrv->get_desired_dma) {
711 dev_err(dev, "%s: device driver does not support CMO\n",
712 __func__);
713 return -EINVAL;
716 viodev->cmo.desired = IOMMU_PAGE_ALIGN(viodrv->get_desired_dma(viodev));
717 if (viodev->cmo.desired < VIO_CMO_MIN_ENT)
718 viodev->cmo.desired = VIO_CMO_MIN_ENT;
719 size = VIO_CMO_MIN_ENT;
721 dev_ent = kmalloc(sizeof(struct vio_cmo_dev_entry),
722 GFP_KERNEL);
723 if (!dev_ent)
724 return -ENOMEM;
726 dev_ent->viodev = viodev;
727 spin_lock_irqsave(&vio_cmo.lock, flags);
728 list_add(&dev_ent->list, &vio_cmo.device_list);
729 } else {
730 viodev->cmo.desired = 0;
731 size = 0;
732 spin_lock_irqsave(&vio_cmo.lock, flags);
736 * If the needs for vio_cmo.min have not changed since they
737 * were last set, the number of devices in the OF tree has
738 * been constant and the IO memory for this is already in
739 * the reserve pool.
741 if (vio_cmo.min == ((vio_cmo_num_OF_devs() + 1) *
742 VIO_CMO_MIN_ENT)) {
743 /* Updated desired entitlement if device requires it */
744 if (size)
745 vio_cmo.desired += (viodev->cmo.desired -
746 VIO_CMO_MIN_ENT);
747 } else {
748 size_t tmp;
750 tmp = vio_cmo.spare + vio_cmo.excess.free;
751 if (tmp < size) {
752 dev_err(dev, "%s: insufficient free "
753 "entitlement to add device. "
754 "Need %lu, have %lu\n", __func__,
755 size, (vio_cmo.spare + tmp));
756 spin_unlock_irqrestore(&vio_cmo.lock, flags);
757 return -ENOMEM;
760 /* Use excess pool first to fulfill request */
761 tmp = min(size, vio_cmo.excess.free);
762 vio_cmo.excess.free -= tmp;
763 vio_cmo.excess.size -= tmp;
764 vio_cmo.reserve.size += tmp;
766 /* Use spare if excess pool was insufficient */
767 vio_cmo.spare -= size - tmp;
769 /* Update bus accounting */
770 vio_cmo.min += size;
771 vio_cmo.desired += viodev->cmo.desired;
773 spin_unlock_irqrestore(&vio_cmo.lock, flags);
774 return 0;
778 * vio_cmo_bus_remove - Handle CMO specific bus removal activities
780 * @viodev - Pointer to struct vio_dev for device
782 * Remove the device from the cmo device list. The minimum entitlement
783 * will be reserved for the device as long as it is in the system. The
784 * rest of the entitlement the device had been allocated will be returned
785 * to the system.
787 static void vio_cmo_bus_remove(struct vio_dev *viodev)
789 struct vio_cmo_dev_entry *dev_ent;
790 unsigned long flags;
791 size_t tmp;
793 spin_lock_irqsave(&vio_cmo.lock, flags);
794 if (viodev->cmo.allocated) {
795 dev_err(&viodev->dev, "%s: device had %lu bytes of IO "
796 "allocated after remove operation.\n",
797 __func__, viodev->cmo.allocated);
798 BUG();
802 * Remove the device from the device list being maintained for
803 * CMO enabled devices.
805 list_for_each_entry(dev_ent, &vio_cmo.device_list, list)
806 if (viodev == dev_ent->viodev) {
807 list_del(&dev_ent->list);
808 kfree(dev_ent);
809 break;
813 * Devices may not require any entitlement and they do not need
814 * to be processed. Otherwise, return the device's entitlement
815 * back to the pools.
817 if (viodev->cmo.entitled) {
819 * This device has not yet left the OF tree, it's
820 * minimum entitlement remains in vio_cmo.min and
821 * vio_cmo.desired
823 vio_cmo.desired -= (viodev->cmo.desired - VIO_CMO_MIN_ENT);
826 * Save min allocation for device in reserve as long
827 * as it exists in OF tree as determined by later
828 * balance operation
830 viodev->cmo.entitled -= VIO_CMO_MIN_ENT;
832 /* Replenish spare from freed reserve pool */
833 if (viodev->cmo.entitled && (vio_cmo.spare < VIO_CMO_MIN_ENT)) {
834 tmp = min(viodev->cmo.entitled, (VIO_CMO_MIN_ENT -
835 vio_cmo.spare));
836 vio_cmo.spare += tmp;
837 viodev->cmo.entitled -= tmp;
840 /* Remaining reserve goes to excess pool */
841 vio_cmo.excess.size += viodev->cmo.entitled;
842 vio_cmo.excess.free += viodev->cmo.entitled;
843 vio_cmo.reserve.size -= viodev->cmo.entitled;
846 * Until the device is removed it will keep a
847 * minimum entitlement; this will guarantee that
848 * a module unload/load will result in a success.
850 viodev->cmo.entitled = VIO_CMO_MIN_ENT;
851 viodev->cmo.desired = VIO_CMO_MIN_ENT;
852 atomic_set(&viodev->cmo.allocs_failed, 0);
855 spin_unlock_irqrestore(&vio_cmo.lock, flags);
858 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
860 vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
861 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
865 * vio_cmo_bus_init - CMO entitlement initialization at bus init time
867 * Set up the reserve and excess entitlement pools based on available
868 * system entitlement and the number of devices in the OF tree that
869 * require entitlement in the reserve pool.
871 static void vio_cmo_bus_init(void)
873 struct hvcall_mpp_data mpp_data;
874 int err;
876 memset(&vio_cmo, 0, sizeof(struct vio_cmo));
877 spin_lock_init(&vio_cmo.lock);
878 INIT_LIST_HEAD(&vio_cmo.device_list);
879 INIT_DELAYED_WORK(&vio_cmo.balance_q, vio_cmo_balance);
881 /* Get current system entitlement */
882 err = h_get_mpp(&mpp_data);
885 * On failure, continue with entitlement set to 0, will panic()
886 * later when spare is reserved.
888 if (err != H_SUCCESS) {
889 printk(KERN_ERR "%s: unable to determine system IO "\
890 "entitlement. (%d)\n", __func__, err);
891 vio_cmo.entitled = 0;
892 } else {
893 vio_cmo.entitled = mpp_data.entitled_mem;
896 /* Set reservation and check against entitlement */
897 vio_cmo.spare = VIO_CMO_MIN_ENT;
898 vio_cmo.reserve.size = vio_cmo.spare;
899 vio_cmo.reserve.size += (vio_cmo_num_OF_devs() *
900 VIO_CMO_MIN_ENT);
901 if (vio_cmo.reserve.size > vio_cmo.entitled) {
902 printk(KERN_ERR "%s: insufficient system entitlement\n",
903 __func__);
904 panic("%s: Insufficient system entitlement", __func__);
907 /* Set the remaining accounting variables */
908 vio_cmo.excess.size = vio_cmo.entitled - vio_cmo.reserve.size;
909 vio_cmo.excess.free = vio_cmo.excess.size;
910 vio_cmo.min = vio_cmo.reserve.size;
911 vio_cmo.desired = vio_cmo.reserve.size;
914 /* sysfs device functions and data structures for CMO */
916 #define viodev_cmo_rd_attr(name) \
917 static ssize_t viodev_cmo_##name##_show(struct device *dev, \
918 struct device_attribute *attr, \
919 char *buf) \
921 return sprintf(buf, "%lu\n", to_vio_dev(dev)->cmo.name); \
924 static ssize_t viodev_cmo_allocs_failed_show(struct device *dev,
925 struct device_attribute *attr, char *buf)
927 struct vio_dev *viodev = to_vio_dev(dev);
928 return sprintf(buf, "%d\n", atomic_read(&viodev->cmo.allocs_failed));
931 static ssize_t viodev_cmo_allocs_failed_reset(struct device *dev,
932 struct device_attribute *attr, const char *buf, size_t count)
934 struct vio_dev *viodev = to_vio_dev(dev);
935 atomic_set(&viodev->cmo.allocs_failed, 0);
936 return count;
939 static ssize_t viodev_cmo_desired_set(struct device *dev,
940 struct device_attribute *attr, const char *buf, size_t count)
942 struct vio_dev *viodev = to_vio_dev(dev);
943 size_t new_desired;
944 int ret;
946 ret = strict_strtoul(buf, 10, &new_desired);
947 if (ret)
948 return ret;
950 vio_cmo_set_dev_desired(viodev, new_desired);
951 return count;
954 viodev_cmo_rd_attr(desired);
955 viodev_cmo_rd_attr(entitled);
956 viodev_cmo_rd_attr(allocated);
958 static ssize_t name_show(struct device *, struct device_attribute *, char *);
959 static ssize_t devspec_show(struct device *, struct device_attribute *, char *);
960 static struct device_attribute vio_cmo_dev_attrs[] = {
961 __ATTR_RO(name),
962 __ATTR_RO(devspec),
963 __ATTR(cmo_desired, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
964 viodev_cmo_desired_show, viodev_cmo_desired_set),
965 __ATTR(cmo_entitled, S_IRUGO, viodev_cmo_entitled_show, NULL),
966 __ATTR(cmo_allocated, S_IRUGO, viodev_cmo_allocated_show, NULL),
967 __ATTR(cmo_allocs_failed, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
968 viodev_cmo_allocs_failed_show, viodev_cmo_allocs_failed_reset),
969 __ATTR_NULL
972 /* sysfs bus functions and data structures for CMO */
974 #define viobus_cmo_rd_attr(name) \
975 static ssize_t \
976 viobus_cmo_##name##_show(struct bus_type *bt, char *buf) \
978 return sprintf(buf, "%lu\n", vio_cmo.name); \
981 #define viobus_cmo_pool_rd_attr(name, var) \
982 static ssize_t \
983 viobus_cmo_##name##_pool_show_##var(struct bus_type *bt, char *buf) \
985 return sprintf(buf, "%lu\n", vio_cmo.name.var); \
988 static ssize_t viobus_cmo_high_reset(struct bus_type *bt, const char *buf,
989 size_t count)
991 unsigned long flags;
993 spin_lock_irqsave(&vio_cmo.lock, flags);
994 vio_cmo.high = vio_cmo.curr;
995 spin_unlock_irqrestore(&vio_cmo.lock, flags);
997 return count;
1000 viobus_cmo_rd_attr(entitled);
1001 viobus_cmo_pool_rd_attr(reserve, size);
1002 viobus_cmo_pool_rd_attr(excess, size);
1003 viobus_cmo_pool_rd_attr(excess, free);
1004 viobus_cmo_rd_attr(spare);
1005 viobus_cmo_rd_attr(min);
1006 viobus_cmo_rd_attr(desired);
1007 viobus_cmo_rd_attr(curr);
1008 viobus_cmo_rd_attr(high);
1010 static struct bus_attribute vio_cmo_bus_attrs[] = {
1011 __ATTR(cmo_entitled, S_IRUGO, viobus_cmo_entitled_show, NULL),
1012 __ATTR(cmo_reserve_size, S_IRUGO, viobus_cmo_reserve_pool_show_size, NULL),
1013 __ATTR(cmo_excess_size, S_IRUGO, viobus_cmo_excess_pool_show_size, NULL),
1014 __ATTR(cmo_excess_free, S_IRUGO, viobus_cmo_excess_pool_show_free, NULL),
1015 __ATTR(cmo_spare, S_IRUGO, viobus_cmo_spare_show, NULL),
1016 __ATTR(cmo_min, S_IRUGO, viobus_cmo_min_show, NULL),
1017 __ATTR(cmo_desired, S_IRUGO, viobus_cmo_desired_show, NULL),
1018 __ATTR(cmo_curr, S_IRUGO, viobus_cmo_curr_show, NULL),
1019 __ATTR(cmo_high, S_IWUSR|S_IRUSR|S_IWGRP|S_IRGRP|S_IROTH,
1020 viobus_cmo_high_show, viobus_cmo_high_reset),
1021 __ATTR_NULL
1024 static void vio_cmo_sysfs_init(void)
1026 vio_bus_type.dev_attrs = vio_cmo_dev_attrs;
1027 vio_bus_type.bus_attrs = vio_cmo_bus_attrs;
1029 #else /* CONFIG_PPC_SMLPAR */
1030 /* Dummy functions for iSeries platform */
1031 int vio_cmo_entitlement_update(size_t new_entitlement) { return 0; }
1032 void vio_cmo_set_dev_desired(struct vio_dev *viodev, size_t desired) {}
1033 static int vio_cmo_bus_probe(struct vio_dev *viodev) { return 0; }
1034 static void vio_cmo_bus_remove(struct vio_dev *viodev) {}
1035 static void vio_cmo_set_dma_ops(struct vio_dev *viodev) {}
1036 static void vio_cmo_bus_init(void) {}
1037 static void vio_cmo_sysfs_init(void) { }
1038 #endif /* CONFIG_PPC_SMLPAR */
1039 EXPORT_SYMBOL(vio_cmo_entitlement_update);
1040 EXPORT_SYMBOL(vio_cmo_set_dev_desired);
1042 static struct iommu_table *vio_build_iommu_table(struct vio_dev *dev)
1044 const unsigned char *dma_window;
1045 struct iommu_table *tbl;
1046 unsigned long offset, size;
1048 if (firmware_has_feature(FW_FEATURE_ISERIES))
1049 return vio_build_iommu_table_iseries(dev);
1051 dma_window = of_get_property(dev->dev.archdata.of_node,
1052 "ibm,my-dma-window", NULL);
1053 if (!dma_window)
1054 return NULL;
1056 tbl = kmalloc(sizeof(*tbl), GFP_KERNEL);
1057 if (tbl == NULL)
1058 return NULL;
1060 of_parse_dma_window(dev->dev.archdata.of_node, dma_window,
1061 &tbl->it_index, &offset, &size);
1063 /* TCE table size - measured in tce entries */
1064 tbl->it_size = size >> IOMMU_PAGE_SHIFT;
1065 /* offset for VIO should always be 0 */
1066 tbl->it_offset = offset >> IOMMU_PAGE_SHIFT;
1067 tbl->it_busno = 0;
1068 tbl->it_type = TCE_VB;
1070 return iommu_init_table(tbl, -1);
1074 * vio_match_device: - Tell if a VIO device has a matching
1075 * VIO device id structure.
1076 * @ids: array of VIO device id structures to search in
1077 * @dev: the VIO device structure to match against
1079 * Used by a driver to check whether a VIO device present in the
1080 * system is in its list of supported devices. Returns the matching
1081 * vio_device_id structure or NULL if there is no match.
1083 static const struct vio_device_id *vio_match_device(
1084 const struct vio_device_id *ids, const struct vio_dev *dev)
1086 while (ids->type[0] != '\0') {
1087 if ((strncmp(dev->type, ids->type, strlen(ids->type)) == 0) &&
1088 of_device_is_compatible(dev->dev.archdata.of_node,
1089 ids->compat))
1090 return ids;
1091 ids++;
1093 return NULL;
1097 * Convert from struct device to struct vio_dev and pass to driver.
1098 * dev->driver has already been set by generic code because vio_bus_match
1099 * succeeded.
1101 static int vio_bus_probe(struct device *dev)
1103 struct vio_dev *viodev = to_vio_dev(dev);
1104 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1105 const struct vio_device_id *id;
1106 int error = -ENODEV;
1108 if (!viodrv->probe)
1109 return error;
1111 id = vio_match_device(viodrv->id_table, viodev);
1112 if (id) {
1113 memset(&viodev->cmo, 0, sizeof(viodev->cmo));
1114 if (firmware_has_feature(FW_FEATURE_CMO)) {
1115 error = vio_cmo_bus_probe(viodev);
1116 if (error)
1117 return error;
1119 error = viodrv->probe(viodev, id);
1120 if (error && firmware_has_feature(FW_FEATURE_CMO))
1121 vio_cmo_bus_remove(viodev);
1124 return error;
1127 /* convert from struct device to struct vio_dev and pass to driver. */
1128 static int vio_bus_remove(struct device *dev)
1130 struct vio_dev *viodev = to_vio_dev(dev);
1131 struct vio_driver *viodrv = to_vio_driver(dev->driver);
1132 struct device *devptr;
1133 int ret = 1;
1136 * Hold a reference to the device after the remove function is called
1137 * to allow for CMO accounting cleanup for the device.
1139 devptr = get_device(dev);
1141 if (viodrv->remove)
1142 ret = viodrv->remove(viodev);
1144 if (!ret && firmware_has_feature(FW_FEATURE_CMO))
1145 vio_cmo_bus_remove(viodev);
1147 put_device(devptr);
1148 return ret;
1152 * vio_register_driver: - Register a new vio driver
1153 * @drv: The vio_driver structure to be registered.
1155 int vio_register_driver(struct vio_driver *viodrv)
1157 printk(KERN_DEBUG "%s: driver %s registering\n", __func__,
1158 viodrv->driver.name);
1160 /* fill in 'struct driver' fields */
1161 viodrv->driver.bus = &vio_bus_type;
1163 return driver_register(&viodrv->driver);
1165 EXPORT_SYMBOL(vio_register_driver);
1168 * vio_unregister_driver - Remove registration of vio driver.
1169 * @driver: The vio_driver struct to be removed form registration
1171 void vio_unregister_driver(struct vio_driver *viodrv)
1173 driver_unregister(&viodrv->driver);
1175 EXPORT_SYMBOL(vio_unregister_driver);
1177 /* vio_dev refcount hit 0 */
1178 static void __devinit vio_dev_release(struct device *dev)
1180 /* XXX should free TCE table */
1181 of_node_put(dev->archdata.of_node);
1182 kfree(to_vio_dev(dev));
1186 * vio_register_device_node: - Register a new vio device.
1187 * @of_node: The OF node for this device.
1189 * Creates and initializes a vio_dev structure from the data in
1190 * of_node and adds it to the list of virtual devices.
1191 * Returns a pointer to the created vio_dev or NULL if node has
1192 * NULL device_type or compatible fields.
1194 struct vio_dev *vio_register_device_node(struct device_node *of_node)
1196 struct vio_dev *viodev;
1197 const unsigned int *unit_address;
1199 /* we need the 'device_type' property, in order to match with drivers */
1200 if (of_node->type == NULL) {
1201 printk(KERN_WARNING "%s: node %s missing 'device_type'\n",
1202 __func__,
1203 of_node->name ? of_node->name : "<unknown>");
1204 return NULL;
1207 unit_address = of_get_property(of_node, "reg", NULL);
1208 if (unit_address == NULL) {
1209 printk(KERN_WARNING "%s: node %s missing 'reg'\n",
1210 __func__,
1211 of_node->name ? of_node->name : "<unknown>");
1212 return NULL;
1215 /* allocate a vio_dev for this node */
1216 viodev = kzalloc(sizeof(struct vio_dev), GFP_KERNEL);
1217 if (viodev == NULL)
1218 return NULL;
1220 viodev->irq = irq_of_parse_and_map(of_node, 0);
1222 dev_set_name(&viodev->dev, "%x", *unit_address);
1223 viodev->name = of_node->name;
1224 viodev->type = of_node->type;
1225 viodev->unit_address = *unit_address;
1226 if (firmware_has_feature(FW_FEATURE_ISERIES)) {
1227 unit_address = of_get_property(of_node,
1228 "linux,unit_address", NULL);
1229 if (unit_address != NULL)
1230 viodev->unit_address = *unit_address;
1232 viodev->dev.archdata.of_node = of_node_get(of_node);
1234 if (firmware_has_feature(FW_FEATURE_CMO))
1235 vio_cmo_set_dma_ops(viodev);
1236 else
1237 viodev->dev.archdata.dma_ops = &dma_iommu_ops;
1238 set_iommu_table_base(&viodev->dev, vio_build_iommu_table(viodev));
1239 set_dev_node(&viodev->dev, of_node_to_nid(of_node));
1241 /* init generic 'struct device' fields: */
1242 viodev->dev.parent = &vio_bus_device.dev;
1243 viodev->dev.bus = &vio_bus_type;
1244 viodev->dev.release = vio_dev_release;
1246 /* register with generic device framework */
1247 if (device_register(&viodev->dev)) {
1248 printk(KERN_ERR "%s: failed to register device %s\n",
1249 __func__, dev_name(&viodev->dev));
1250 /* XXX free TCE table */
1251 kfree(viodev);
1252 return NULL;
1255 return viodev;
1257 EXPORT_SYMBOL(vio_register_device_node);
1260 * vio_bus_init: - Initialize the virtual IO bus
1262 static int __init vio_bus_init(void)
1264 int err;
1265 struct device_node *node_vroot;
1267 if (firmware_has_feature(FW_FEATURE_CMO))
1268 vio_cmo_sysfs_init();
1270 err = bus_register(&vio_bus_type);
1271 if (err) {
1272 printk(KERN_ERR "failed to register VIO bus\n");
1273 return err;
1277 * The fake parent of all vio devices, just to give us
1278 * a nice directory
1280 err = device_register(&vio_bus_device.dev);
1281 if (err) {
1282 printk(KERN_WARNING "%s: device_register returned %i\n",
1283 __func__, err);
1284 return err;
1287 if (firmware_has_feature(FW_FEATURE_CMO))
1288 vio_cmo_bus_init();
1290 node_vroot = of_find_node_by_name(NULL, "vdevice");
1291 if (node_vroot) {
1292 struct device_node *of_node;
1295 * Create struct vio_devices for each virtual device in
1296 * the device tree. Drivers will associate with them later.
1298 for (of_node = node_vroot->child; of_node != NULL;
1299 of_node = of_node->sibling)
1300 vio_register_device_node(of_node);
1301 of_node_put(node_vroot);
1304 return 0;
1306 __initcall(vio_bus_init);
1308 static ssize_t name_show(struct device *dev,
1309 struct device_attribute *attr, char *buf)
1311 return sprintf(buf, "%s\n", to_vio_dev(dev)->name);
1314 static ssize_t devspec_show(struct device *dev,
1315 struct device_attribute *attr, char *buf)
1317 struct device_node *of_node = dev->archdata.of_node;
1319 return sprintf(buf, "%s\n", of_node ? of_node->full_name : "none");
1322 static struct device_attribute vio_dev_attrs[] = {
1323 __ATTR_RO(name),
1324 __ATTR_RO(devspec),
1325 __ATTR_NULL
1328 void __devinit vio_unregister_device(struct vio_dev *viodev)
1330 device_unregister(&viodev->dev);
1332 EXPORT_SYMBOL(vio_unregister_device);
1334 static int vio_bus_match(struct device *dev, struct device_driver *drv)
1336 const struct vio_dev *vio_dev = to_vio_dev(dev);
1337 struct vio_driver *vio_drv = to_vio_driver(drv);
1338 const struct vio_device_id *ids = vio_drv->id_table;
1340 return (ids != NULL) && (vio_match_device(ids, vio_dev) != NULL);
1343 static int vio_hotplug(struct device *dev, struct kobj_uevent_env *env)
1345 const struct vio_dev *vio_dev = to_vio_dev(dev);
1346 struct device_node *dn;
1347 const char *cp;
1349 dn = dev->archdata.of_node;
1350 if (!dn)
1351 return -ENODEV;
1352 cp = of_get_property(dn, "compatible", NULL);
1353 if (!cp)
1354 return -ENODEV;
1356 add_uevent_var(env, "MODALIAS=vio:T%sS%s", vio_dev->type, cp);
1357 return 0;
1360 static struct bus_type vio_bus_type = {
1361 .name = "vio",
1362 .dev_attrs = vio_dev_attrs,
1363 .uevent = vio_hotplug,
1364 .match = vio_bus_match,
1365 .probe = vio_bus_probe,
1366 .remove = vio_bus_remove,
1370 * vio_get_attribute: - get attribute for virtual device
1371 * @vdev: The vio device to get property.
1372 * @which: The property/attribute to be extracted.
1373 * @length: Pointer to length of returned data size (unused if NULL).
1375 * Calls prom.c's of_get_property() to return the value of the
1376 * attribute specified by @which
1378 const void *vio_get_attribute(struct vio_dev *vdev, char *which, int *length)
1380 return of_get_property(vdev->dev.archdata.of_node, which, length);
1382 EXPORT_SYMBOL(vio_get_attribute);
1384 #ifdef CONFIG_PPC_PSERIES
1385 /* vio_find_name() - internal because only vio.c knows how we formatted the
1386 * kobject name
1388 static struct vio_dev *vio_find_name(const char *name)
1390 struct device *found;
1392 found = bus_find_device_by_name(&vio_bus_type, NULL, name);
1393 if (!found)
1394 return NULL;
1396 return to_vio_dev(found);
1400 * vio_find_node - find an already-registered vio_dev
1401 * @vnode: device_node of the virtual device we're looking for
1403 struct vio_dev *vio_find_node(struct device_node *vnode)
1405 const uint32_t *unit_address;
1406 char kobj_name[20];
1408 /* construct the kobject name from the device node */
1409 unit_address = of_get_property(vnode, "reg", NULL);
1410 if (!unit_address)
1411 return NULL;
1412 snprintf(kobj_name, sizeof(kobj_name), "%x", *unit_address);
1414 return vio_find_name(kobj_name);
1416 EXPORT_SYMBOL(vio_find_node);
1418 int vio_enable_interrupts(struct vio_dev *dev)
1420 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_ENABLE);
1421 if (rc != H_SUCCESS)
1422 printk(KERN_ERR "vio: Error 0x%x enabling interrupts\n", rc);
1423 return rc;
1425 EXPORT_SYMBOL(vio_enable_interrupts);
1427 int vio_disable_interrupts(struct vio_dev *dev)
1429 int rc = h_vio_signal(dev->unit_address, VIO_IRQ_DISABLE);
1430 if (rc != H_SUCCESS)
1431 printk(KERN_ERR "vio: Error 0x%x disabling interrupts\n", rc);
1432 return rc;
1434 EXPORT_SYMBOL(vio_disable_interrupts);
1435 #endif /* CONFIG_PPC_PSERIES */