2 * Copyright (C) 2009 Novell. All Rights Reserved.
5 * Gregory Haskins <ghaskins@novell.com>
7 * This file is free software; you can redistribute it and/or modify
8 * it under the terms of version 2 of the GNU General Public License
9 * as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
21 #include <linux/module.h>
22 #include <linux/pci.h>
24 #include <linux/workqueue.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
27 #include <linux/ioq.h>
28 #include <linux/interrupt.h>
29 #include <linux/vbus_driver.h>
30 #include <linux/vbus_pci.h>
32 MODULE_AUTHOR("Gregory Haskins");
33 MODULE_LICENSE("GPL");
36 #define VBUS_PCI_NAME "pci-to-vbus-bridge"
42 struct vbus_pci_event
*ring
;
43 struct vbus_pci_regs
*regs
;
44 struct vbus_pci_signals
*signals
;
59 static struct vbus_pci vbus_pci
;
61 struct vbus_pci_device
{
62 char type
[VBUS_MAX_DEVTYPE_LEN
];
64 struct list_head shms
;
65 struct vbus_device_proxy vdev
;
66 struct work_struct add
;
67 struct work_struct drop
;
70 DEFINE_PER_CPU(struct vbus_pci_fastcall_desc
, vbus_pci_percpu_fastcall
)
71 ____cacheline_aligned
;
80 vbus_pci_bridgecall(unsigned long nr
, void *data
, unsigned long len
)
82 struct vbus_pci_call_desc params
= {
90 spin_lock_irqsave(&vbus_pci
.lock
, flags
);
92 memcpy_toio(&vbus_pci
.regs
->bridgecall
, ¶ms
, sizeof(params
));
93 ret
= ioread32(&vbus_pci
.regs
->bridgecall
);
95 spin_unlock_irqrestore(&vbus_pci
.lock
, flags
);
97 vbus_pci
.stats
.bridgecalls
++;
103 vbus_pci_buscall(unsigned long nr
, void *data
, unsigned long len
)
105 struct vbus_pci_fastcall_desc
*params
;
110 params
= &get_cpu_var(vbus_pci_percpu_fastcall
);
112 params
->call
.vector
= nr
;
113 params
->call
.len
= len
;
114 params
->call
.datap
= __pa(data
);
116 iowrite32(smp_processor_id(), &vbus_pci
.signals
->fastcall
);
118 ret
= params
->result
;
122 vbus_pci
.stats
.buscalls
++;
127 struct vbus_pci_device
*
128 to_dev(struct vbus_device_proxy
*vdev
)
130 return container_of(vdev
, struct vbus_pci_device
, vdev
);
134 _signal_init(struct shm_signal
*signal
, struct shm_signal_desc
*desc
,
135 struct shm_signal_ops
*ops
)
137 desc
->magic
= SHM_SIGNAL_MAGIC
;
138 desc
->ver
= SHM_SIGNAL_VER
;
140 shm_signal_init(signal
, shm_locality_north
, ops
, desc
);
144 * -------------------
146 * -------------------
150 struct vbus_pci
*pcivbus
;
151 struct shm_signal signal
;
154 struct list_head list
;
161 static struct _signal
*
162 to_signal(struct shm_signal
*signal
)
164 return container_of(signal
, struct _signal
, signal
);
168 _signal_inject(struct shm_signal
*signal
)
170 struct _signal
*_signal
= to_signal(signal
);
172 vbus_pci
.stats
.inject
++;
173 _signal
->stats
.inject
++;
174 iowrite32(_signal
->handle
, &vbus_pci
.signals
->shmsignal
);
180 _signal_release(struct shm_signal
*signal
)
182 struct _signal
*_signal
= to_signal(signal
);
187 static struct shm_signal_ops _signal_ops
= {
188 .inject
= _signal_inject
,
189 .release
= _signal_release
,
193 * -------------------
194 * vbus_device_proxy routines
195 * -------------------
199 vbus_pci_device_open(struct vbus_device_proxy
*vdev
, int version
, int flags
)
201 struct vbus_pci_device
*dev
= to_dev(vdev
);
202 struct vbus_pci_deviceopen params
;
208 params
.devid
= vdev
->id
;
209 params
.version
= version
;
211 ret
= vbus_pci_buscall(VBUS_PCI_HC_DEVOPEN
,
212 ¶ms
, sizeof(params
));
216 dev
->handle
= params
.handle
;
222 vbus_pci_device_close(struct vbus_device_proxy
*vdev
, int flags
)
224 struct vbus_pci_device
*dev
= to_dev(vdev
);
225 unsigned long iflags
;
231 spin_lock_irqsave(&vbus_pci
.lock
, iflags
);
233 while (!list_empty(&dev
->shms
)) {
234 struct _signal
*_signal
;
236 _signal
= list_first_entry(&dev
->shms
, struct _signal
, list
);
238 list_del(&_signal
->list
);
240 spin_unlock_irqrestore(&vbus_pci
.lock
, iflags
);
241 shm_signal_put(&_signal
->signal
);
242 spin_lock_irqsave(&vbus_pci
.lock
, iflags
);
245 spin_unlock_irqrestore(&vbus_pci
.lock
, iflags
);
248 * The DEVICECLOSE will implicitly close all of the shm on the
249 * host-side, so there is no need to do an explicit per-shm
252 ret
= vbus_pci_buscall(VBUS_PCI_HC_DEVCLOSE
,
253 &dev
->handle
, sizeof(dev
->handle
));
256 printk(KERN_ERR
"VBUS-PCI: Error closing device %s/%lld: %d\n",
257 vdev
->type
, vdev
->id
, ret
);
265 vbus_pci_device_shm(struct vbus_device_proxy
*vdev
, int id
, int prio
,
266 void *ptr
, size_t len
,
267 struct shm_signal_desc
*sdesc
, struct shm_signal
**signal
,
270 struct vbus_pci_device
*dev
= to_dev(vdev
);
271 struct _signal
*_signal
= NULL
;
272 struct vbus_pci_deviceshm params
;
273 unsigned long iflags
;
279 params
.devh
= dev
->handle
;
281 params
.flags
= flags
;
282 params
.datap
= (u64
)__pa(ptr
);
287 * The signal descriptor must be embedded within the
291 || (len
< sizeof(*sdesc
))
292 || ((void *)sdesc
< ptr
)
293 || ((void *)sdesc
> (ptr
+ len
- sizeof(*sdesc
))))
296 _signal
= kzalloc(sizeof(*_signal
), GFP_KERNEL
);
300 _signal_init(&_signal
->signal
, sdesc
, &_signal_ops
);
303 * take another reference for the host. This is dropped
304 * by a SHMCLOSE event
306 shm_signal_get(&_signal
->signal
);
308 params
.signal
.offset
= (u64
)sdesc
- (u64
)ptr
;
309 params
.signal
.prio
= prio
;
310 params
.signal
.cookie
= (u64
)_signal
;
313 params
.signal
.offset
= -1; /* yes, this is a u32, but its ok */
315 ret
= vbus_pci_buscall(VBUS_PCI_HC_DEVSHM
,
316 ¶ms
, sizeof(params
));
320 * We held two references above, so we need to drop
323 shm_signal_put(&_signal
->signal
);
324 shm_signal_put(&_signal
->signal
);
333 _signal
->handle
= ret
;
335 spin_lock_irqsave(&vbus_pci
.lock
, iflags
);
337 list_add_tail(&_signal
->list
, &dev
->shms
);
339 spin_unlock_irqrestore(&vbus_pci
.lock
, iflags
);
341 shm_signal_get(&_signal
->signal
);
342 *signal
= &_signal
->signal
;
349 vbus_pci_device_call(struct vbus_device_proxy
*vdev
, u32 func
, void *data
,
350 size_t len
, int flags
)
352 struct vbus_pci_device
*dev
= to_dev(vdev
);
353 struct vbus_pci_devicecall params
= {
356 .datap
= (u64
)__pa(data
),
364 return vbus_pci_buscall(VBUS_PCI_HC_DEVCALL
, ¶ms
, sizeof(params
));
368 vbus_pci_device_release(struct vbus_device_proxy
*vdev
)
370 struct vbus_pci_device
*_dev
= to_dev(vdev
);
372 vbus_pci_device_close(vdev
, 0);
377 struct vbus_device_proxy_ops vbus_pci_device_ops
= {
378 .open
= vbus_pci_device_open
,
379 .close
= vbus_pci_device_close
,
380 .shm
= vbus_pci_device_shm
,
381 .call
= vbus_pci_device_call
,
382 .release
= vbus_pci_device_release
,
386 * -------------------
388 * -------------------
392 deferred_devadd(struct work_struct
*work
)
394 struct vbus_pci_device
*new;
397 new = container_of(work
, struct vbus_pci_device
, add
);
399 ret
= vbus_device_proxy_register(&new->vdev
);
401 panic("failed to register device %lld(%s): %d\n",
402 new->vdev
.id
, new->type
, ret
);
406 deferred_devdrop(struct work_struct
*work
)
408 struct vbus_pci_device
*dev
;
410 dev
= container_of(work
, struct vbus_pci_device
, drop
);
411 vbus_device_proxy_unregister(&dev
->vdev
);
415 event_devadd(struct vbus_pci_add_event
*event
)
417 struct vbus_pci_device
*new = kzalloc(sizeof(*new), GFP_KERNEL
);
419 printk(KERN_ERR
"VBUS_PCI: Out of memory on add_event\n");
423 INIT_LIST_HEAD(&new->shms
);
425 memcpy(new->type
, event
->type
, VBUS_MAX_DEVTYPE_LEN
);
426 new->vdev
.type
= new->type
;
427 new->vdev
.id
= event
->id
;
428 new->vdev
.ops
= &vbus_pci_device_ops
;
430 dev_set_name(&new->vdev
.dev
, "%lld", event
->id
);
432 INIT_WORK(&new->add
, deferred_devadd
);
433 INIT_WORK(&new->drop
, deferred_devdrop
);
435 schedule_work(&new->add
);
439 event_devdrop(struct vbus_pci_handle_event
*event
)
441 struct vbus_device_proxy
*dev
= vbus_device_proxy_find(event
->handle
);
444 printk(KERN_WARNING
"VBUS-PCI: devdrop failed: %lld\n",
449 schedule_work(&to_dev(dev
)->drop
);
453 event_shmsignal(struct vbus_pci_handle_event
*event
)
455 struct _signal
*_signal
= (struct _signal
*)event
->handle
;
457 vbus_pci
.stats
.notify
++;
458 _signal
->stats
.notify
++;
459 _shm_signal_wakeup(&_signal
->signal
);
463 event_shmclose(struct vbus_pci_handle_event
*event
)
465 struct _signal
*_signal
= (struct _signal
*)event
->handle
;
468 * This reference was taken during the DEVICESHM call
470 shm_signal_put(&_signal
->signal
);
474 * -------------------
476 * -------------------
479 static struct ioq_notifier eventq_notifier
;
482 eventq_init(int qlen
)
484 struct ioq_iterator iter
;
488 vbus_pci
.ring
= kzalloc(sizeof(struct vbus_pci_event
) * qlen
,
494 * We want to iterate on the "valid" index. By default the iterator
495 * will not "autoupdate" which means it will not hypercall the host
496 * with our changes. This is good, because we are really just
497 * initializing stuff here anyway. Note that you can always manually
498 * signal the host with ioq_signal() if the autoupdate feature is not
501 ret
= ioq_iter_init(&vbus_pci
.eventq
, &iter
, ioq_idxtype_valid
, 0);
505 * Seek to the tail of the valid index (which should be our first
506 * item since the queue is brand-new)
508 ret
= ioq_iter_seek(&iter
, ioq_seek_tail
, 0, 0);
512 * Now populate each descriptor with an empty vbus_event and mark it
515 for (i
= 0; i
< qlen
; i
++) {
516 struct vbus_pci_event
*event
= &vbus_pci
.ring
[i
];
517 size_t len
= sizeof(*event
);
518 struct ioq_ring_desc
*desc
= iter
.desc
;
520 BUG_ON(iter
.desc
->valid
);
522 desc
->cookie
= (u64
)event
;
523 desc
->ptr
= (u64
)__pa(event
);
524 desc
->len
= len
; /* total length */
528 * This push operation will simultaneously advance the
529 * valid-tail index and increment our position in the queue
532 ret
= ioq_iter_push(&iter
, 0);
536 vbus_pci
.eventq
.notifier
= &eventq_notifier
;
539 * And finally, ensure that we can receive notification
541 ioq_notify_enable(&vbus_pci
.eventq
, 0);
546 /* Invoked whenever the hypervisor ioq_signal()s our eventq */
548 eventq_wakeup(struct ioq_notifier
*notifier
)
550 struct ioq_iterator iter
;
553 /* We want to iterate on the head of the in-use index */
554 ret
= ioq_iter_init(&vbus_pci
.eventq
, &iter
, ioq_idxtype_inuse
, 0);
557 ret
= ioq_iter_seek(&iter
, ioq_seek_head
, 0, 0);
561 * The EOM is indicated by finding a packet that is still owned by
564 * FIXME: This in theory could run indefinitely if the host keeps
565 * feeding us events since there is nothing like a NAPI budget. We
566 * might need to address that
568 while (!iter
.desc
->sown
) {
569 struct ioq_ring_desc
*desc
= iter
.desc
;
570 struct vbus_pci_event
*event
;
572 event
= (struct vbus_pci_event
*)desc
->cookie
;
574 switch (event
->eventid
) {
575 case VBUS_PCI_EVENT_DEVADD
:
576 event_devadd(&event
->data
.add
);
578 case VBUS_PCI_EVENT_DEVDROP
:
579 event_devdrop(&event
->data
.handle
);
581 case VBUS_PCI_EVENT_SHMSIGNAL
:
582 event_shmsignal(&event
->data
.handle
);
584 case VBUS_PCI_EVENT_SHMCLOSE
:
585 event_shmclose(&event
->data
.handle
);
588 printk(KERN_WARNING
"VBUS_PCI: Unexpected event %d\n",
593 memset(event
, 0, sizeof(*event
));
595 /* Advance the in-use head */
596 ret
= ioq_iter_pop(&iter
, 0);
599 vbus_pci
.stats
.events
++;
602 /* And let the south side know that we changed the queue */
603 ioq_signal(&vbus_pci
.eventq
, 0);
606 static struct ioq_notifier eventq_notifier
= {
607 .signal
= &eventq_wakeup
,
610 /* Injected whenever the host issues an ioq_signal() on the eventq */
612 eventq_intr(int irq
, void *dev
)
614 vbus_pci
.stats
.qnotify
++;
615 _shm_signal_wakeup(vbus_pci
.eventq
.signal
);
621 * -------------------
625 eventq_signal_inject(struct shm_signal
*signal
)
627 vbus_pci
.stats
.qinject
++;
629 /* The eventq uses the special-case handle=0 */
630 iowrite32(0, &vbus_pci
.signals
->eventq
);
636 eventq_signal_release(struct shm_signal
*signal
)
641 static struct shm_signal_ops eventq_signal_ops
= {
642 .inject
= eventq_signal_inject
,
643 .release
= eventq_signal_release
,
647 * -------------------
651 eventq_ioq_release(struct ioq
*ioq
)
653 /* released as part of the vbus_pci object */
656 static struct ioq_ops eventq_ioq_ops
= {
657 .release
= eventq_ioq_release
,
661 * -------------------
665 vbus_pci_release(void)
667 #ifdef CONFIG_DEBUG_FS
668 if (vbus_pci
.stats
.fs
)
669 debugfs_remove(vbus_pci
.stats
.fs
);
672 if (vbus_pci
.irq
> 0)
673 free_irq(vbus_pci
.irq
, NULL
);
675 if (vbus_pci
.signals
)
676 pci_iounmap(vbus_pci
.dev
, (void *)vbus_pci
.signals
);
679 pci_iounmap(vbus_pci
.dev
, (void *)vbus_pci
.regs
);
681 pci_release_regions(vbus_pci
.dev
);
682 pci_disable_device(vbus_pci
.dev
);
684 kfree(vbus_pci
.eventq
.head_desc
);
685 kfree(vbus_pci
.ring
);
687 vbus_pci
.enabled
= false;
693 struct vbus_pci_bridge_negotiate params
= {
694 .magic
= VBUS_PCI_ABI_MAGIC
,
695 .version
= VBUS_PCI_HC_VERSION
,
699 return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_NEGOTIATE
,
700 ¶ms
, sizeof(params
));
706 vbus_pci_eventq_register(void)
708 struct vbus_pci_busreg params
= {
713 .ring
= (u64
)__pa(vbus_pci
.eventq
.head_desc
),
714 .data
= (u64
)__pa(vbus_pci
.ring
),
719 return vbus_pci_bridgecall(VBUS_PCI_BRIDGE_QREG
,
720 ¶ms
, sizeof(params
));
724 _ioq_init(size_t ringsize
, struct ioq
*ioq
, struct ioq_ops
*ops
)
726 struct shm_signal
*signal
= NULL
;
727 struct ioq_ring_head
*head
= NULL
;
728 size_t len
= IOQ_HEAD_DESC_SIZE(ringsize
);
730 head
= kzalloc(len
, GFP_KERNEL
| GFP_DMA
);
734 signal
= kzalloc(sizeof(*signal
), GFP_KERNEL
);
740 head
->magic
= IOQ_RING_MAGIC
;
741 head
->ver
= IOQ_RING_VER
;
742 head
->count
= ringsize
;
744 _signal_init(signal
, &head
->signal
, &eventq_signal_ops
);
746 ioq_init(ioq
, ops
, ioq_locality_north
, head
, signal
, ringsize
);
751 #ifdef CONFIG_DEBUG_FS
752 static int _debugfs_seq_show(struct seq_file
*m
, void *p
)
755 seq_printf(m, " .%-30s: %d\n", #F, (int)vbus_pci.stats.F)
770 static int _debugfs_fops_open(struct inode
*inode
, struct file
*file
)
772 return single_open(file
, _debugfs_seq_show
, inode
->i_private
);
775 static const struct file_operations stat_fops
= {
776 .open
= _debugfs_fops_open
,
779 .release
= single_release
,
780 .owner
= THIS_MODULE
,
785 vbus_pci_probe(struct pci_dev
*pdev
, const struct pci_device_id
*ent
)
790 if (vbus_pci
.enabled
)
791 return -EEXIST
; /* we only support one bridge per kernel */
793 if (pdev
->revision
!= VBUS_PCI_ABI_VERSION
) {
794 printk(KERN_DEBUG
"VBUS_PCI: expected ABI version %d, got %d\n",
795 VBUS_PCI_ABI_VERSION
,
802 ret
= pci_enable_device(pdev
);
806 ret
= pci_request_regions(pdev
, VBUS_PCI_NAME
);
808 printk(KERN_ERR
"VBUS_PCI: Could not init BARs: %d\n", ret
);
812 vbus_pci
.regs
= pci_iomap(pdev
, 0, sizeof(struct vbus_pci_regs
));
813 if (!vbus_pci
.regs
) {
814 printk(KERN_ERR
"VBUS_PCI: Could not map BARs\n");
818 vbus_pci
.signals
= pci_iomap(pdev
, 1, sizeof(struct vbus_pci_signals
));
819 if (!vbus_pci
.signals
) {
820 printk(KERN_ERR
"VBUS_PCI: Could not map BARs\n");
824 ret
= vbus_pci_open();
826 printk(KERN_DEBUG
"VBUS_PCI: Could not register with host: %d\n",
832 * Allocate an IOQ to use for host-2-guest event notification
834 ret
= _ioq_init(QLEN
, &vbus_pci
.eventq
, &eventq_ioq_ops
);
836 printk(KERN_ERR
"VBUS_PCI: Cound not init eventq: %d\n", ret
);
840 ret
= eventq_init(QLEN
);
842 printk(KERN_ERR
"VBUS_PCI: Cound not setup ring: %d\n", ret
);
846 ret
= pci_enable_msi(pdev
);
848 printk(KERN_ERR
"VBUS_PCI: Cound not enable MSI: %d\n", ret
);
852 vbus_pci
.irq
= pdev
->irq
;
854 ret
= request_irq(pdev
->irq
, eventq_intr
, 0, "vbus", NULL
);
856 printk(KERN_ERR
"VBUS_PCI: Failed to register IRQ %d\n: %d",
862 * Add one fastcall vector per cpu so that we can do lockless
865 for_each_possible_cpu(cpu
) {
866 struct vbus_pci_fastcall_desc
*desc
=
867 &per_cpu(vbus_pci_percpu_fastcall
, cpu
);
868 struct vbus_pci_call_desc params
= {
870 .len
= sizeof(*desc
),
874 ret
= vbus_pci_bridgecall(VBUS_PCI_BRIDGE_FASTCALL_ADD
,
875 ¶ms
, sizeof(params
));
878 "VBUS_PCI: Failed to register cpu:%d\n: %d",
885 * Finally register our queue on the host to start receiving events
887 ret
= vbus_pci_eventq_register();
889 printk(KERN_ERR
"VBUS_PCI: Could not register with host: %d\n",
894 #ifdef CONFIG_DEBUG_FS
895 vbus_pci
.stats
.fs
= debugfs_create_file(VBUS_PCI_NAME
, S_IRUGO
,
896 NULL
, NULL
, &stat_fops
);
897 if (IS_ERR(vbus_pci
.stats
.fs
)) {
898 ret
= PTR_ERR(vbus_pci
.stats
.fs
);
899 printk(KERN_ERR
"VBUS_PCI: error creating stats-fs: %d\n", ret
);
904 vbus_pci
.enabled
= true;
906 printk(KERN_INFO
"Virtual-Bus: Copyright (c) 2009, " \
907 "Gregory Haskins <ghaskins@novell.com>\n");
917 static void __devexit
918 vbus_pci_remove(struct pci_dev
*pdev
)
923 static DEFINE_PCI_DEVICE_TABLE(vbus_pci_tbl
) = {
924 { PCI_DEVICE(0x11da, 0x2000) },
928 MODULE_DEVICE_TABLE(pci
, vbus_pci_tbl
);
930 static struct pci_driver vbus_pci_driver
= {
931 .name
= VBUS_PCI_NAME
,
932 .id_table
= vbus_pci_tbl
,
933 .probe
= vbus_pci_probe
,
934 .remove
= vbus_pci_remove
,
940 memset(&vbus_pci
, 0, sizeof(vbus_pci
));
941 spin_lock_init(&vbus_pci
.lock
);
943 return pci_register_driver(&vbus_pci_driver
);
949 pci_unregister_driver(&vbus_pci_driver
);
952 module_init(vbus_pci_init
);
953 module_exit(vbus_pci_exit
);