1 // SPDX-License-Identifier: GPL-2.0
3 * virtio_pmem.c: Virtio pmem Driver
5 * Discovers persistent memory range information
6 * from host and provides a virtio based flushing
9 #include "virtio_pmem.h"
12 /* The interrupt handler */
13 void virtio_pmem_host_ack(struct virtqueue
*vq
)
15 struct virtio_pmem
*vpmem
= vq
->vdev
->priv
;
16 struct virtio_pmem_request
*req_data
, *req_buf
;
20 spin_lock_irqsave(&vpmem
->pmem_lock
, flags
);
21 while ((req_data
= virtqueue_get_buf(vq
, &len
)) != NULL
) {
22 req_data
->done
= true;
23 wake_up(&req_data
->host_acked
);
25 if (!list_empty(&vpmem
->req_list
)) {
26 req_buf
= list_first_entry(&vpmem
->req_list
,
27 struct virtio_pmem_request
, list
);
28 req_buf
->wq_buf_avail
= true;
29 wake_up(&req_buf
->wq_buf
);
30 list_del(&req_buf
->list
);
33 spin_unlock_irqrestore(&vpmem
->pmem_lock
, flags
);
35 EXPORT_SYMBOL_GPL(virtio_pmem_host_ack
);
37 /* The request submission function */
38 static int virtio_pmem_flush(struct nd_region
*nd_region
)
40 struct virtio_device
*vdev
= nd_region
->provider_data
;
41 struct virtio_pmem
*vpmem
= vdev
->priv
;
42 struct virtio_pmem_request
*req_data
;
43 struct scatterlist
*sgs
[2], sg
, ret
;
48 req_data
= kmalloc(sizeof(*req_data
), GFP_KERNEL
);
52 req_data
->done
= false;
53 init_waitqueue_head(&req_data
->host_acked
);
54 init_waitqueue_head(&req_data
->wq_buf
);
55 INIT_LIST_HEAD(&req_data
->list
);
56 req_data
->req
.type
= cpu_to_le32(VIRTIO_PMEM_REQ_TYPE_FLUSH
);
57 sg_init_one(&sg
, &req_data
->req
, sizeof(req_data
->req
));
59 sg_init_one(&ret
, &req_data
->resp
.ret
, sizeof(req_data
->resp
));
62 spin_lock_irqsave(&vpmem
->pmem_lock
, flags
);
64 * If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
65 * queue does not have free descriptor. We add the request
66 * to req_list and wait for host_ack to wake us up when free
67 * slots are available.
69 while ((err
= virtqueue_add_sgs(vpmem
->req_vq
, sgs
, 1, 1, req_data
,
70 GFP_ATOMIC
)) == -ENOSPC
) {
72 dev_info(&vdev
->dev
, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
73 req_data
->wq_buf_avail
= false;
74 list_add_tail(&req_data
->list
, &vpmem
->req_list
);
75 spin_unlock_irqrestore(&vpmem
->pmem_lock
, flags
);
77 /* A host response results in "host_ack" getting called */
78 wait_event(req_data
->wq_buf
, req_data
->wq_buf_avail
);
79 spin_lock_irqsave(&vpmem
->pmem_lock
, flags
);
81 err1
= virtqueue_kick(vpmem
->req_vq
);
82 spin_unlock_irqrestore(&vpmem
->pmem_lock
, flags
);
84 * virtqueue_add_sgs failed with error different than -ENOSPC, we can't
85 * do anything about that.
88 dev_info(&vdev
->dev
, "failed to send command to virtio pmem device\n");
91 /* A host repsonse results in "host_ack" getting called */
92 wait_event(req_data
->host_acked
, req_data
->done
);
93 err
= le32_to_cpu(req_data
->resp
.ret
);
100 /* The asynchronous flush callback function */
101 int async_pmem_flush(struct nd_region
*nd_region
, struct bio
*bio
)
104 * Create child bio for asynchronous flush and chain with
105 * parent bio. Otherwise directly call nd_region flush.
107 if (bio
&& bio
->bi_iter
.bi_sector
!= -1) {
108 struct bio
*child
= bio_alloc(GFP_ATOMIC
, 0);
112 bio_copy_dev(child
, bio
);
113 child
->bi_opf
= REQ_PREFLUSH
;
114 child
->bi_iter
.bi_sector
= -1;
115 bio_chain(child
, bio
);
119 if (virtio_pmem_flush(nd_region
))
124 EXPORT_SYMBOL_GPL(async_pmem_flush
);
125 MODULE_LICENSE("GPL");