1 #include <linux/kernel.h>
2 #include <linux/errno.h>
3 #include <linux/file.h>
4 #include <linux/slab.h>
6 #include <linux/io_uring.h>
12 static const struct ubuf_info_ops io_ubuf_ops
;
14 static void io_notif_tw_complete(struct io_kiocb
*notif
, struct io_tw_state
*ts
)
16 struct io_notif_data
*nd
= io_notif_to_data(notif
);
19 notif
= cmd_to_io_kiocb(nd
);
21 lockdep_assert(refcount_read(&nd
->uarg
.refcnt
) == 0);
23 if (unlikely(nd
->zc_report
) && (nd
->zc_copied
|| !nd
->zc_used
))
24 notif
->cqe
.res
|= IORING_NOTIF_USAGE_ZC_COPIED
;
26 if (nd
->account_pages
&& notif
->ctx
->user
) {
27 __io_unaccount_mem(notif
->ctx
->user
, nd
->account_pages
);
28 nd
->account_pages
= 0;
32 io_req_task_complete(notif
, ts
);
36 void io_tx_ubuf_complete(struct sk_buff
*skb
, struct ubuf_info
*uarg
,
39 struct io_notif_data
*nd
= container_of(uarg
, struct io_notif_data
, uarg
);
40 struct io_kiocb
*notif
= cmd_to_io_kiocb(nd
);
44 if (success
&& !nd
->zc_used
&& skb
)
45 WRITE_ONCE(nd
->zc_used
, true);
46 else if (!success
&& !nd
->zc_copied
)
47 WRITE_ONCE(nd
->zc_copied
, true);
50 if (!refcount_dec_and_test(&uarg
->refcnt
))
54 io_tx_ubuf_complete(skb
, &nd
->head
->uarg
, success
);
58 tw_flags
= nd
->next
? 0 : IOU_F_TWQ_LAZY_WAKE
;
59 notif
->io_task_work
.func
= io_notif_tw_complete
;
60 __io_req_task_work_add(notif
, tw_flags
);
63 static int io_link_skb(struct sk_buff
*skb
, struct ubuf_info
*uarg
)
65 struct io_notif_data
*nd
, *prev_nd
;
66 struct io_kiocb
*prev_notif
, *notif
;
67 struct ubuf_info
*prev_uarg
= skb_zcopy(skb
);
69 nd
= container_of(uarg
, struct io_notif_data
, uarg
);
70 notif
= cmd_to_io_kiocb(nd
);
73 net_zcopy_get(&nd
->uarg
);
74 skb_zcopy_init(skb
, &nd
->uarg
);
77 /* handle it separately as we can't link a notif to itself */
78 if (unlikely(prev_uarg
== &nd
->uarg
))
80 /* we can't join two links together, just request a fresh skb */
81 if (unlikely(nd
->head
!= nd
|| nd
->next
))
83 /* don't mix zc providers */
84 if (unlikely(prev_uarg
->ops
!= &io_ubuf_ops
))
87 prev_nd
= container_of(prev_uarg
, struct io_notif_data
, uarg
);
88 prev_notif
= cmd_to_io_kiocb(nd
);
90 /* make sure all noifications can be finished in the same task_work */
91 if (unlikely(notif
->ctx
!= prev_notif
->ctx
||
92 notif
->tctx
!= prev_notif
->tctx
))
95 nd
->head
= prev_nd
->head
;
96 nd
->next
= prev_nd
->next
;
98 net_zcopy_get(&nd
->head
->uarg
);
102 static const struct ubuf_info_ops io_ubuf_ops
= {
103 .complete
= io_tx_ubuf_complete
,
104 .link_skb
= io_link_skb
,
107 struct io_kiocb
*io_alloc_notif(struct io_ring_ctx
*ctx
)
108 __must_hold(&ctx
->uring_lock
)
110 struct io_kiocb
*notif
;
111 struct io_notif_data
*nd
;
113 if (unlikely(!io_alloc_req(ctx
, ¬if
)))
115 notif
->opcode
= IORING_OP_NOP
;
118 notif
->tctx
= current
->io_uring
;
120 notif
->file_node
= NULL
;
121 notif
->buf_node
= NULL
;
123 nd
= io_notif_to_data(notif
);
124 nd
->zc_report
= false;
125 nd
->account_pages
= 0;
129 nd
->uarg
.flags
= IO_NOTIF_UBUF_FLAGS
;
130 nd
->uarg
.ops
= &io_ubuf_ops
;
131 refcount_set(&nd
->uarg
.refcnt
, 1);