2 * Intel MIC Platform Software Stack (MPSS)
4 * Copyright(c) 2013 Intel Corporation.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License, version 2, as
8 * published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
15 * The full GNU General Public License is included in this distribution in
16 * the file called "COPYING".
18 * Intel MIC Host driver.
21 #include <linux/pci.h>
22 #include <linux/sched.h>
23 #include <linux/uaccess.h>
24 #include <linux/dmaengine.h>
25 #include <linux/mic_common.h>
27 #include "../common/mic_dev.h"
28 #include "mic_device.h"
30 #include "mic_virtio.h"
33 * Size of the internal buffer used during DMA's as an intermediate buffer
34 * for copy to/from user.
36 #define MIC_INT_DMA_BUF_SIZE PAGE_ALIGN(64 * 1024ULL)
38 static int mic_sync_dma(struct mic_device
*mdev
, dma_addr_t dst
,
39 dma_addr_t src
, size_t len
)
42 struct dma_async_tx_descriptor
*tx
;
43 struct dma_chan
*mic_ch
= mdev
->dma_ch
;
50 tx
= mic_ch
->device
->device_prep_dma_memcpy(mic_ch
, dst
, src
, len
,
56 dma_cookie_t cookie
= tx
->tx_submit(tx
);
58 err
= dma_submit_error(cookie
);
61 err
= dma_sync_wait(mic_ch
, cookie
);
65 dev_err(mdev
->sdev
->parent
, "%s %d err %d\n",
66 __func__
, __LINE__
, err
);
71 * Initiates the copies across the PCIe bus from card memory to a user
72 * space buffer. When transfers are done using DMA, source/destination
73 * addresses and transfer length must follow the alignment requirements of
76 static int mic_virtio_copy_to_user(struct mic_vdev
*mvdev
, void __user
*ubuf
,
77 size_t len
, u64 daddr
, size_t dlen
,
80 struct mic_device
*mdev
= mvdev
->mdev
;
81 void __iomem
*dbuf
= mdev
->aper
.va
+ daddr
;
82 struct mic_vringh
*mvr
= &mvdev
->mvr
[vr_idx
];
83 size_t dma_alignment
= 1 << mdev
->dma_ch
->device
->copy_align
;
88 dma_offset
= daddr
- round_down(daddr
, dma_alignment
);
93 partlen
= min_t(size_t, len
, MIC_INT_DMA_BUF_SIZE
);
95 err
= mic_sync_dma(mdev
, mvr
->buf_da
, daddr
,
96 ALIGN(partlen
, dma_alignment
));
100 if (copy_to_user(ubuf
, mvr
->buf
+ dma_offset
,
101 partlen
- dma_offset
)) {
108 mvdev
->in_bytes_dma
+= partlen
;
109 mvdev
->in_bytes
+= partlen
;
115 dev_err(mic_dev(mvdev
), "%s %d err %d\n", __func__
, __LINE__
, err
);
120 * Initiates copies across the PCIe bus from a user space buffer to card
121 * memory. When transfers are done using DMA, source/destination addresses
122 * and transfer length must follow the alignment requirements of the MIC
125 static int mic_virtio_copy_from_user(struct mic_vdev
*mvdev
, void __user
*ubuf
,
126 size_t len
, u64 daddr
, size_t dlen
,
129 struct mic_device
*mdev
= mvdev
->mdev
;
130 void __iomem
*dbuf
= mdev
->aper
.va
+ daddr
;
131 struct mic_vringh
*mvr
= &mvdev
->mvr
[vr_idx
];
132 size_t dma_alignment
= 1 << mdev
->dma_ch
->device
->copy_align
;
136 if (daddr
& (dma_alignment
- 1)) {
137 mvdev
->tx_dst_unaligned
+= len
;
139 } else if (ALIGN(len
, dma_alignment
) > dlen
) {
140 mvdev
->tx_len_unaligned
+= len
;
145 partlen
= min_t(size_t, len
, MIC_INT_DMA_BUF_SIZE
);
147 if (copy_from_user(mvr
->buf
, ubuf
, partlen
)) {
151 err
= mic_sync_dma(mdev
, daddr
, mvr
->buf_da
,
152 ALIGN(partlen
, dma_alignment
));
158 mvdev
->out_bytes_dma
+= partlen
;
159 mvdev
->out_bytes
+= partlen
;
164 * We are copying to IO below and should ideally use something
165 * like copy_from_user_toio(..) if it existed.
167 if (copy_from_user((void __force
*)dbuf
, ubuf
, len
)) {
171 mvdev
->out_bytes
+= len
;
174 dev_err(mic_dev(mvdev
), "%s %d err %d\n", __func__
, __LINE__
, err
);
178 #define MIC_VRINGH_READ true
180 /* The function to call to notify the card about added buffers */
181 static void mic_notify(struct vringh
*vrh
)
183 struct mic_vringh
*mvrh
= container_of(vrh
, struct mic_vringh
, vrh
);
184 struct mic_vdev
*mvdev
= mvrh
->mvdev
;
185 s8 db
= mvdev
->dc
->h2c_vdev_db
;
188 mvdev
->mdev
->ops
->send_intr(mvdev
->mdev
, db
);
191 /* Determine the total number of bytes consumed in a VRINGH KIOV */
192 static inline u32
mic_vringh_iov_consumed(struct vringh_kiov
*iov
)
195 u32 total
= iov
->consumed
;
197 for (i
= 0; i
< iov
->i
; i
++)
198 total
+= iov
->iov
[i
].iov_len
;
203 * Traverse the VRINGH KIOV and issue the APIs to trigger the copies.
204 * This API is heavily based on the vringh_iov_xfer(..) implementation
205 * in vringh.c. The reason we cannot reuse vringh_iov_pull_kern(..)
206 * and vringh_iov_push_kern(..) directly is because there is no
207 * way to override the VRINGH xfer(..) routines as of v3.10.
209 static int mic_vringh_copy(struct mic_vdev
*mvdev
, struct vringh_kiov
*iov
,
210 void __user
*ubuf
, size_t len
, bool read
, int vr_idx
,
214 size_t partlen
, tot_len
= 0;
216 while (len
&& iov
->i
< iov
->used
) {
217 partlen
= min(iov
->iov
[iov
->i
].iov_len
, len
);
219 ret
= mic_virtio_copy_to_user(mvdev
, ubuf
, partlen
,
220 (u64
)iov
->iov
[iov
->i
].iov_base
,
221 iov
->iov
[iov
->i
].iov_len
,
224 ret
= mic_virtio_copy_from_user(mvdev
, ubuf
, partlen
,
225 (u64
)iov
->iov
[iov
->i
].iov_base
,
226 iov
->iov
[iov
->i
].iov_len
,
229 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
230 __func__
, __LINE__
, ret
);
236 iov
->consumed
+= partlen
;
237 iov
->iov
[iov
->i
].iov_len
-= partlen
;
238 iov
->iov
[iov
->i
].iov_base
+= partlen
;
239 if (!iov
->iov
[iov
->i
].iov_len
) {
240 /* Fix up old iov element then increment. */
241 iov
->iov
[iov
->i
].iov_len
= iov
->consumed
;
242 iov
->iov
[iov
->i
].iov_base
-= iov
->consumed
;
253 * Use the standard VRINGH infrastructure in the kernel to fetch new
254 * descriptors, initiate the copies and update the used ring.
256 static int _mic_virtio_copy(struct mic_vdev
*mvdev
,
257 struct mic_copy_desc
*copy
)
260 u32 iovcnt
= copy
->iovcnt
;
262 struct iovec __user
*u_iov
= copy
->iov
;
263 void __user
*ubuf
= NULL
;
264 struct mic_vringh
*mvr
= &mvdev
->mvr
[copy
->vr_idx
];
265 struct vringh_kiov
*riov
= &mvr
->riov
;
266 struct vringh_kiov
*wiov
= &mvr
->wiov
;
267 struct vringh
*vrh
= &mvr
->vrh
;
268 u16
*head
= &mvr
->head
;
269 struct mic_vring
*vr
= &mvr
->vring
;
270 size_t len
= 0, out_len
;
273 /* Fetch a new IOVEC if all previous elements have been processed */
274 if (riov
->i
== riov
->used
&& wiov
->i
== wiov
->used
) {
275 ret
= vringh_getdesc_kern(vrh
, riov
, wiov
,
277 /* Check if there are available descriptors */
283 /* Copy over a new iovec from user space. */
284 ret
= copy_from_user(&iov
, u_iov
, sizeof(*u_iov
));
287 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
288 __func__
, __LINE__
, ret
);
294 /* Issue all the read descriptors first */
295 ret
= mic_vringh_copy(mvdev
, riov
, ubuf
, len
, MIC_VRINGH_READ
,
296 copy
->vr_idx
, &out_len
);
298 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
299 __func__
, __LINE__
, ret
);
304 copy
->out_len
+= out_len
;
305 /* Issue the write descriptors next */
306 ret
= mic_vringh_copy(mvdev
, wiov
, ubuf
, len
, !MIC_VRINGH_READ
,
307 copy
->vr_idx
, &out_len
);
309 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
310 __func__
, __LINE__
, ret
);
315 copy
->out_len
+= out_len
;
317 /* One user space iovec is now completed */
321 /* Exit loop if all elements in KIOVs have been processed. */
322 if (riov
->i
== riov
->used
&& wiov
->i
== wiov
->used
)
326 * Update the used ring if a descriptor was available and some data was
327 * copied in/out and the user asked for a used ring update.
329 if (*head
!= USHRT_MAX
&& copy
->out_len
&& copy
->update_used
) {
332 /* Determine the total data consumed */
333 total
+= mic_vringh_iov_consumed(riov
);
334 total
+= mic_vringh_iov_consumed(wiov
);
335 vringh_complete_kern(vrh
, *head
, total
);
337 if (vringh_need_notify_kern(vrh
) > 0)
339 vringh_kiov_cleanup(riov
);
340 vringh_kiov_cleanup(wiov
);
341 /* Update avail idx for user space */
342 vr
->info
->avail_idx
= vrh
->last_avail_idx
;
347 static inline int mic_verify_copy_args(struct mic_vdev
*mvdev
,
348 struct mic_copy_desc
*copy
)
350 if (copy
->vr_idx
>= mvdev
->dd
->num_vq
) {
351 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
352 __func__
, __LINE__
, -EINVAL
);
358 /* Copy a specified number of virtio descriptors in a chain */
359 int mic_virtio_copy_desc(struct mic_vdev
*mvdev
,
360 struct mic_copy_desc
*copy
)
363 struct mic_vringh
*mvr
= &mvdev
->mvr
[copy
->vr_idx
];
365 err
= mic_verify_copy_args(mvdev
, copy
);
369 mutex_lock(&mvr
->vr_mutex
);
370 if (!mic_vdevup(mvdev
)) {
372 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
373 __func__
, __LINE__
, err
);
376 err
= _mic_virtio_copy(mvdev
, copy
);
378 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
379 __func__
, __LINE__
, err
);
382 mutex_unlock(&mvr
->vr_mutex
);
386 static void mic_virtio_init_post(struct mic_vdev
*mvdev
)
388 struct mic_vqconfig
*vqconfig
= mic_vq_config(mvdev
->dd
);
391 for (i
= 0; i
< mvdev
->dd
->num_vq
; i
++) {
392 if (!le64_to_cpu(vqconfig
[i
].used_address
)) {
393 dev_warn(mic_dev(mvdev
), "used_address zero??\n");
396 mvdev
->mvr
[i
].vrh
.vring
.used
=
397 (void __force
*)mvdev
->mdev
->aper
.va
+
398 le64_to_cpu(vqconfig
[i
].used_address
);
401 mvdev
->dc
->used_address_updated
= 0;
403 dev_dbg(mic_dev(mvdev
), "%s: device type %d LINKUP\n",
404 __func__
, mvdev
->virtio_id
);
407 static inline void mic_virtio_device_reset(struct mic_vdev
*mvdev
)
411 dev_dbg(mic_dev(mvdev
), "%s: status %d device type %d RESET\n",
412 __func__
, mvdev
->dd
->status
, mvdev
->virtio_id
);
414 for (i
= 0; i
< mvdev
->dd
->num_vq
; i
++)
416 * Avoid lockdep false positive. The + 1 is for the mic
417 * mutex which is held in the reset devices code path.
419 mutex_lock_nested(&mvdev
->mvr
[i
].vr_mutex
, i
+ 1);
421 /* 0 status means "reset" */
422 mvdev
->dd
->status
= 0;
423 mvdev
->dc
->vdev_reset
= 0;
424 mvdev
->dc
->host_ack
= 1;
426 for (i
= 0; i
< mvdev
->dd
->num_vq
; i
++) {
427 struct vringh
*vrh
= &mvdev
->mvr
[i
].vrh
;
428 mvdev
->mvr
[i
].vring
.info
->avail_idx
= 0;
430 vrh
->last_avail_idx
= 0;
431 vrh
->last_used_idx
= 0;
434 for (i
= 0; i
< mvdev
->dd
->num_vq
; i
++)
435 mutex_unlock(&mvdev
->mvr
[i
].vr_mutex
);
438 void mic_virtio_reset_devices(struct mic_device
*mdev
)
440 struct list_head
*pos
, *tmp
;
441 struct mic_vdev
*mvdev
;
443 dev_dbg(mdev
->sdev
->parent
, "%s\n", __func__
);
445 list_for_each_safe(pos
, tmp
, &mdev
->vdev_list
) {
446 mvdev
= list_entry(pos
, struct mic_vdev
, list
);
447 mic_virtio_device_reset(mvdev
);
448 mvdev
->poll_wake
= 1;
449 wake_up(&mvdev
->waitq
);
453 void mic_bh_handler(struct work_struct
*work
)
455 struct mic_vdev
*mvdev
= container_of(work
, struct mic_vdev
,
458 if (mvdev
->dc
->used_address_updated
)
459 mic_virtio_init_post(mvdev
);
461 if (mvdev
->dc
->vdev_reset
)
462 mic_virtio_device_reset(mvdev
);
464 mvdev
->poll_wake
= 1;
465 wake_up(&mvdev
->waitq
);
468 static irqreturn_t
mic_virtio_intr_handler(int irq
, void *data
)
470 struct mic_vdev
*mvdev
= data
;
471 struct mic_device
*mdev
= mvdev
->mdev
;
473 mdev
->ops
->intr_workarounds(mdev
);
474 schedule_work(&mvdev
->virtio_bh_work
);
478 int mic_virtio_config_change(struct mic_vdev
*mvdev
,
481 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake
);
482 int ret
= 0, retry
, i
;
483 struct mic_bootparam
*bootparam
= mvdev
->mdev
->dp
;
484 s8 db
= bootparam
->h2c_config_db
;
486 mutex_lock(&mvdev
->mdev
->mic_mutex
);
487 for (i
= 0; i
< mvdev
->dd
->num_vq
; i
++)
488 mutex_lock_nested(&mvdev
->mvr
[i
].vr_mutex
, i
+ 1);
490 if (db
== -1 || mvdev
->dd
->type
== -1) {
495 if (copy_from_user(mic_vq_configspace(mvdev
->dd
),
496 argp
, mvdev
->dd
->config_len
)) {
497 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
498 __func__
, __LINE__
, -EFAULT
);
502 mvdev
->dc
->config_change
= MIC_VIRTIO_PARAM_CONFIG_CHANGED
;
503 mvdev
->mdev
->ops
->send_intr(mvdev
->mdev
, db
);
505 for (retry
= 100; retry
--;) {
506 ret
= wait_event_timeout(wake
,
507 mvdev
->dc
->guest_ack
, msecs_to_jiffies(100));
512 dev_dbg(mic_dev(mvdev
),
513 "%s %d retry: %d\n", __func__
, __LINE__
, retry
);
514 mvdev
->dc
->config_change
= 0;
515 mvdev
->dc
->guest_ack
= 0;
517 for (i
= 0; i
< mvdev
->dd
->num_vq
; i
++)
518 mutex_unlock(&mvdev
->mvr
[i
].vr_mutex
);
519 mutex_unlock(&mvdev
->mdev
->mic_mutex
);
523 static int mic_copy_dp_entry(struct mic_vdev
*mvdev
,
526 struct mic_device_desc
**devpage
)
528 struct mic_device
*mdev
= mvdev
->mdev
;
529 struct mic_device_desc dd
, *dd_config
, *devp
;
530 struct mic_vqconfig
*vqconfig
;
532 bool slot_found
= false;
534 if (copy_from_user(&dd
, argp
, sizeof(dd
))) {
535 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
536 __func__
, __LINE__
, -EFAULT
);
540 if (mic_aligned_desc_size(&dd
) > MIC_MAX_DESC_BLK_SIZE
||
541 dd
.num_vq
> MIC_MAX_VRINGS
) {
542 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
543 __func__
, __LINE__
, -EINVAL
);
547 dd_config
= kmalloc(mic_desc_size(&dd
), GFP_KERNEL
);
548 if (dd_config
== NULL
) {
549 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
550 __func__
, __LINE__
, -ENOMEM
);
553 if (copy_from_user(dd_config
, argp
, mic_desc_size(&dd
))) {
555 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
556 __func__
, __LINE__
, ret
);
560 vqconfig
= mic_vq_config(dd_config
);
561 for (i
= 0; i
< dd
.num_vq
; i
++) {
562 if (le16_to_cpu(vqconfig
[i
].num
) > MIC_MAX_VRING_ENTRIES
) {
564 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
565 __func__
, __LINE__
, ret
);
570 /* Find the first free device page entry */
571 for (i
= sizeof(struct mic_bootparam
);
572 i
< MIC_DP_SIZE
- mic_total_desc_size(dd_config
);
573 i
+= mic_total_desc_size(devp
)) {
575 if (devp
->type
== 0 || devp
->type
== -1) {
582 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
583 __func__
, __LINE__
, ret
);
587 * Save off the type before doing the memcpy. Type will be set in the
588 * end after completing all initialization for the new device.
590 *type
= dd_config
->type
;
592 memcpy(devp
, dd_config
, mic_desc_size(dd_config
));
600 static void mic_init_device_ctrl(struct mic_vdev
*mvdev
,
601 struct mic_device_desc
*devpage
)
603 struct mic_device_ctrl
*dc
;
605 dc
= (void *)devpage
+ mic_aligned_desc_size(devpage
);
607 dc
->config_change
= 0;
611 dc
->used_address_updated
= 0;
612 dc
->c2h_vdev_db
= -1;
613 dc
->h2c_vdev_db
= -1;
617 int mic_virtio_add_device(struct mic_vdev
*mvdev
,
620 struct mic_device
*mdev
= mvdev
->mdev
;
621 struct mic_device_desc
*dd
= NULL
;
622 struct mic_vqconfig
*vqconfig
;
623 int vr_size
, i
, j
, ret
;
627 struct mic_bootparam
*bootparam
= mdev
->dp
;
631 mutex_lock(&mdev
->mic_mutex
);
633 ret
= mic_copy_dp_entry(mvdev
, argp
, &type
, &dd
);
635 mutex_unlock(&mdev
->mic_mutex
);
639 mic_init_device_ctrl(mvdev
, dd
);
642 mvdev
->virtio_id
= type
;
643 vqconfig
= mic_vq_config(dd
);
644 INIT_WORK(&mvdev
->virtio_bh_work
, mic_bh_handler
);
646 for (i
= 0; i
< dd
->num_vq
; i
++) {
647 struct mic_vringh
*mvr
= &mvdev
->mvr
[i
];
648 struct mic_vring
*vr
= &mvdev
->mvr
[i
].vring
;
649 num
= le16_to_cpu(vqconfig
[i
].num
);
650 mutex_init(&mvr
->vr_mutex
);
651 vr_size
= PAGE_ALIGN(vring_size(num
, MIC_VIRTIO_RING_ALIGN
) +
652 sizeof(struct _mic_vring_info
));
654 __get_free_pages(GFP_KERNEL
| __GFP_ZERO
,
658 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
659 __func__
, __LINE__
, ret
);
663 vr
->info
= vr
->va
+ vring_size(num
, MIC_VIRTIO_RING_ALIGN
);
664 vr
->info
->magic
= cpu_to_le32(MIC_MAGIC
+ mvdev
->virtio_id
+ i
);
665 vr_addr
= mic_map_single(mdev
, vr
->va
, vr_size
);
666 if (mic_map_error(vr_addr
)) {
667 free_pages((unsigned long)vr
->va
, get_order(vr_size
));
669 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
670 __func__
, __LINE__
, ret
);
673 vqconfig
[i
].address
= cpu_to_le64(vr_addr
);
675 vring_init(&vr
->vr
, num
, vr
->va
, MIC_VIRTIO_RING_ALIGN
);
676 ret
= vringh_init_kern(&mvr
->vrh
,
677 *(u32
*)mic_vq_features(mvdev
->dd
), num
, false,
678 vr
->vr
.desc
, vr
->vr
.avail
, vr
->vr
.used
);
680 dev_err(mic_dev(mvdev
), "%s %d err %d\n",
681 __func__
, __LINE__
, ret
);
684 vringh_kiov_init(&mvr
->riov
, NULL
, 0);
685 vringh_kiov_init(&mvr
->wiov
, NULL
, 0);
686 mvr
->head
= USHRT_MAX
;
688 mvr
->vrh
.notify
= mic_notify
;
689 dev_dbg(mdev
->sdev
->parent
,
690 "%s %d index %d va %p info %p vr_size 0x%x\n",
691 __func__
, __LINE__
, i
, vr
->va
, vr
->info
, vr_size
);
692 mvr
->buf
= (void *)__get_free_pages(GFP_KERNEL
,
693 get_order(MIC_INT_DMA_BUF_SIZE
));
694 mvr
->buf_da
= mic_map_single(mvdev
->mdev
, mvr
->buf
,
695 MIC_INT_DMA_BUF_SIZE
);
698 snprintf(irqname
, sizeof(irqname
), "mic%dvirtio%d", mdev
->id
,
700 mvdev
->virtio_db
= mic_next_db(mdev
);
701 mvdev
->virtio_cookie
= mic_request_threaded_irq(mdev
,
702 mic_virtio_intr_handler
,
703 NULL
, irqname
, mvdev
,
704 mvdev
->virtio_db
, MIC_INTR_DB
);
705 if (IS_ERR(mvdev
->virtio_cookie
)) {
706 ret
= PTR_ERR(mvdev
->virtio_cookie
);
707 dev_dbg(mdev
->sdev
->parent
, "request irq failed\n");
711 mvdev
->dc
->c2h_vdev_db
= mvdev
->virtio_db
;
713 list_add_tail(&mvdev
->list
, &mdev
->vdev_list
);
715 * Order the type update with previous stores. This write barrier
716 * is paired with the corresponding read barrier before the uncached
717 * system memory read of the type, on the card while scanning the
723 dev_dbg(mdev
->sdev
->parent
, "Added virtio device id %d\n", dd
->type
);
725 db
= bootparam
->h2c_config_db
;
727 mdev
->ops
->send_intr(mdev
, db
);
728 mutex_unlock(&mdev
->mic_mutex
);
731 vqconfig
= mic_vq_config(dd
);
732 for (j
= 0; j
< i
; j
++) {
733 struct mic_vringh
*mvr
= &mvdev
->mvr
[j
];
734 mic_unmap_single(mdev
, le64_to_cpu(vqconfig
[j
].address
),
736 free_pages((unsigned long)mvr
->vring
.va
,
737 get_order(mvr
->vring
.len
));
739 mutex_unlock(&mdev
->mic_mutex
);
743 void mic_virtio_del_device(struct mic_vdev
*mvdev
)
745 struct list_head
*pos
, *tmp
;
746 struct mic_vdev
*tmp_mvdev
;
747 struct mic_device
*mdev
= mvdev
->mdev
;
748 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wake
);
750 struct mic_vqconfig
*vqconfig
;
751 struct mic_bootparam
*bootparam
= mdev
->dp
;
754 mutex_lock(&mdev
->mic_mutex
);
755 db
= bootparam
->h2c_config_db
;
757 goto skip_hot_remove
;
758 dev_dbg(mdev
->sdev
->parent
,
759 "Requesting hot remove id %d\n", mvdev
->virtio_id
);
760 mvdev
->dc
->config_change
= MIC_VIRTIO_PARAM_DEV_REMOVE
;
761 mdev
->ops
->send_intr(mdev
, db
);
762 for (retry
= 100; retry
--;) {
763 ret
= wait_event_timeout(wake
,
764 mvdev
->dc
->guest_ack
, msecs_to_jiffies(100));
768 dev_dbg(mdev
->sdev
->parent
,
769 "Device id %d config_change %d guest_ack %d retry %d\n",
770 mvdev
->virtio_id
, mvdev
->dc
->config_change
,
771 mvdev
->dc
->guest_ack
, retry
);
772 mvdev
->dc
->config_change
= 0;
773 mvdev
->dc
->guest_ack
= 0;
775 mic_free_irq(mdev
, mvdev
->virtio_cookie
, mvdev
);
776 flush_work(&mvdev
->virtio_bh_work
);
777 vqconfig
= mic_vq_config(mvdev
->dd
);
778 for (i
= 0; i
< mvdev
->dd
->num_vq
; i
++) {
779 struct mic_vringh
*mvr
= &mvdev
->mvr
[i
];
781 mic_unmap_single(mvdev
->mdev
, mvr
->buf_da
,
782 MIC_INT_DMA_BUF_SIZE
);
783 free_pages((unsigned long)mvr
->buf
,
784 get_order(MIC_INT_DMA_BUF_SIZE
));
785 vringh_kiov_cleanup(&mvr
->riov
);
786 vringh_kiov_cleanup(&mvr
->wiov
);
787 mic_unmap_single(mdev
, le64_to_cpu(vqconfig
[i
].address
),
789 free_pages((unsigned long)mvr
->vring
.va
,
790 get_order(mvr
->vring
.len
));
793 list_for_each_safe(pos
, tmp
, &mdev
->vdev_list
) {
794 tmp_mvdev
= list_entry(pos
, struct mic_vdev
, list
);
795 if (tmp_mvdev
== mvdev
) {
797 dev_dbg(mdev
->sdev
->parent
,
798 "Removing virtio device id %d\n",
804 * Order the type update with previous stores. This write barrier
805 * is paired with the corresponding read barrier before the uncached
806 * system memory read of the type, on the card while scanning the
810 mvdev
->dd
->type
= -1;
811 mutex_unlock(&mdev
->mic_mutex
);