1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2019 Intel Corporation. All rights rsvd. */
3 #include <linux/init.h>
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <uapi/linux/idxd.h>
11 struct idxd_desc
*idxd_alloc_desc(struct idxd_wq
*wq
, enum idxd_op_type optype
)
13 struct idxd_desc
*desc
;
15 struct idxd_device
*idxd
= wq
->idxd
;
17 if (idxd
->state
!= IDXD_DEV_ENABLED
)
20 if (optype
== IDXD_OP_BLOCK
)
21 percpu_down_read(&wq
->submit_lock
);
22 else if (!percpu_down_read_trylock(&wq
->submit_lock
))
23 return ERR_PTR(-EBUSY
);
25 if (!atomic_add_unless(&wq
->dq_count
, 1, wq
->size
)) {
28 if (optype
== IDXD_OP_NONBLOCK
) {
29 percpu_up_read(&wq
->submit_lock
);
30 return ERR_PTR(-EAGAIN
);
33 percpu_up_read(&wq
->submit_lock
);
34 percpu_down_write(&wq
->submit_lock
);
35 rc
= wait_event_interruptible(wq
->submit_waitq
,
36 atomic_add_unless(&wq
->dq_count
,
38 idxd
->state
!= IDXD_DEV_ENABLED
);
39 percpu_up_write(&wq
->submit_lock
);
41 return ERR_PTR(-EINTR
);
42 if (idxd
->state
!= IDXD_DEV_ENABLED
)
45 percpu_up_read(&wq
->submit_lock
);
48 idx
= sbitmap_get(&wq
->sbmap
, 0, false);
50 atomic_dec(&wq
->dq_count
);
51 return ERR_PTR(-EAGAIN
);
54 desc
= wq
->descs
[idx
];
55 memset(desc
->hw
, 0, sizeof(struct dsa_hw_desc
));
56 memset(desc
->completion
, 0, sizeof(struct dsa_completion_record
));
60 void idxd_free_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
)
62 atomic_dec(&wq
->dq_count
);
64 sbitmap_clear_bit(&wq
->sbmap
, desc
->id
);
65 wake_up(&wq
->submit_waitq
);
68 int idxd_submit_desc(struct idxd_wq
*wq
, struct idxd_desc
*desc
)
70 struct idxd_device
*idxd
= wq
->idxd
;
71 int vec
= desc
->hw
->int_handle
;
74 if (idxd
->state
!= IDXD_DEV_ENABLED
)
77 portal
= wq
->dportal
+ idxd_get_wq_portal_offset(IDXD_PORTAL_UNLIMITED
);
79 * The wmb() flushes writes to coherent DMA data before possibly
80 * triggering a DMA read. The wmb() is necessary even on UP because
81 * the recipient is a device.
84 iosubmit_cmds512(portal
, desc
->hw
, 1);
87 * Pending the descriptor to the lockless list for the irq_entry
88 * that we designated the descriptor to.
90 if (desc
->hw
->flags
& IDXD_OP_FLAG_RCI
)
91 llist_add(&desc
->llnode
,
92 &idxd
->irq_entries
[vec
].pending_llist
);