1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
2 /* Copyright (C) 2019 Netronome Systems, Inc. */
4 #include <linux/bitfield.h>
6 #include <linux/skbuff.h>
11 /* CCM messages via the mailbox. CMSGs get wrapped into simple TLVs
12 * and copied into the mailbox. Multiple messages can be copied to
13 * form a batch. Threads come in with CMSG formed in an skb, then
14 * enqueue that skb onto the request queue. If threads skb is first
15 * in queue this thread will handle the mailbox operation. It copies
16 * up to 64 messages into the mailbox (making sure that both requests
17 * and replies will fit. After FW is done processing the batch it
18 * copies the data out and wakes waiting threads.
19 * If a thread is waiting it either gets its the message completed
20 * (response is copied into the same skb as the request, overwriting
21 * it), or becomes the first in queue.
22 * Completions and next-to-run are signaled via the control buffer
23 * to limit potential cache line bounces.
26 #define NFP_CCM_MBOX_BATCH_LIMIT 64
27 #define NFP_CCM_TIMEOUT (NFP_NET_POLL_TIMEOUT * 1000)
28 #define NFP_CCM_MAX_QLEN 1024
30 enum nfp_net_mbox_cmsg_state
{
31 NFP_NET_MBOX_CMSG_STATE_QUEUED
,
32 NFP_NET_MBOX_CMSG_STATE_NEXT
,
33 NFP_NET_MBOX_CMSG_STATE_BUSY
,
34 NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND
,
35 NFP_NET_MBOX_CMSG_STATE_DONE
,
39 * struct nfp_ccm_mbox_skb_cb - CCM mailbox specific info
40 * @state: processing state (/stage) of the message
41 * @err: error encountered during processing if any
42 * @max_len: max(request_len, reply_len)
43 * @exp_reply: expected reply length (0 means don't validate)
44 * @posted: the message was posted and nobody waits for the reply
46 struct nfp_ccm_mbox_cmsg_cb
{
47 enum nfp_net_mbox_cmsg_state state
;
50 unsigned int exp_reply
;
54 static u32
nfp_ccm_mbox_max_msg(struct nfp_net
*nn
)
56 return round_down(nn
->tlv_caps
.mbox_len
, 4) -
57 NFP_NET_CFG_MBOX_SIMPLE_VAL
- /* common mbox command header */
58 4 * 2; /* Msg TLV plus End TLV headers */
62 nfp_ccm_mbox_msg_init(struct sk_buff
*skb
, unsigned int exp_reply
, int max_len
)
64 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
66 cb
->state
= NFP_NET_MBOX_CMSG_STATE_QUEUED
;
68 cb
->max_len
= max_len
;
69 cb
->exp_reply
= exp_reply
;
73 static int nfp_ccm_mbox_maxlen(const struct sk_buff
*skb
)
75 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
80 static bool nfp_ccm_mbox_done(struct sk_buff
*skb
)
82 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
84 return cb
->state
== NFP_NET_MBOX_CMSG_STATE_DONE
;
87 static bool nfp_ccm_mbox_in_progress(struct sk_buff
*skb
)
89 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
91 return cb
->state
!= NFP_NET_MBOX_CMSG_STATE_QUEUED
&&
92 cb
->state
!= NFP_NET_MBOX_CMSG_STATE_NEXT
;
95 static void nfp_ccm_mbox_set_busy(struct sk_buff
*skb
)
97 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
99 cb
->state
= NFP_NET_MBOX_CMSG_STATE_BUSY
;
102 static bool nfp_ccm_mbox_is_posted(struct sk_buff
*skb
)
104 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
109 static void nfp_ccm_mbox_mark_posted(struct sk_buff
*skb
)
111 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
116 static bool nfp_ccm_mbox_is_first(struct nfp_net
*nn
, struct sk_buff
*skb
)
118 return skb_queue_is_first(&nn
->mbox_cmsg
.queue
, skb
);
121 static bool nfp_ccm_mbox_should_run(struct nfp_net
*nn
, struct sk_buff
*skb
)
123 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
125 return cb
->state
== NFP_NET_MBOX_CMSG_STATE_NEXT
;
128 static void nfp_ccm_mbox_mark_next_runner(struct nfp_net
*nn
)
130 struct nfp_ccm_mbox_cmsg_cb
*cb
;
133 skb
= skb_peek(&nn
->mbox_cmsg
.queue
);
137 cb
= (void *)skb
->cb
;
138 cb
->state
= NFP_NET_MBOX_CMSG_STATE_NEXT
;
140 queue_work(nn
->mbox_cmsg
.workq
, &nn
->mbox_cmsg
.runq_work
);
144 nfp_ccm_mbox_write_tlv(struct nfp_net
*nn
, u32 off
, u32 type
, u32 len
)
147 FIELD_PREP(NFP_NET_MBOX_TLV_TYPE
, type
) |
148 FIELD_PREP(NFP_NET_MBOX_TLV_LEN
, len
));
151 static void nfp_ccm_mbox_copy_in(struct nfp_net
*nn
, struct sk_buff
*last
)
158 off
= nn
->tlv_caps
.mbox_off
+ NFP_NET_CFG_MBOX_SIMPLE_VAL
;
159 skb
= __skb_peek(&nn
->mbox_cmsg
.queue
);
161 nfp_ccm_mbox_write_tlv(nn
, off
, NFP_NET_MBOX_TLV_TYPE_MSG
,
165 /* Write data word by word, skb->data should be aligned */
166 data
= (__be32
*)skb
->data
;
168 for (i
= 0 ; i
< cnt
; i
++) {
169 nn_writel(nn
, off
, be32_to_cpu(data
[i
]));
175 memcpy(&tmp
, &data
[i
], skb
->len
& 3);
176 nn_writel(nn
, off
, be32_to_cpu(tmp
));
180 /* Reserve space if reply is bigger */
181 len
= round_up(skb
->len
, 4);
182 reserve
= nfp_ccm_mbox_maxlen(skb
) - len
;
184 nfp_ccm_mbox_write_tlv(nn
, off
,
185 NFP_NET_MBOX_TLV_TYPE_RESV
,
192 skb
= skb_queue_next(&nn
->mbox_cmsg
.queue
, skb
);
195 nfp_ccm_mbox_write_tlv(nn
, off
, NFP_NET_MBOX_TLV_TYPE_END
, 0);
198 static struct sk_buff
*
199 nfp_ccm_mbox_find_req(struct nfp_net
*nn
, __be16 tag
, struct sk_buff
*last
)
203 skb
= __skb_peek(&nn
->mbox_cmsg
.queue
);
205 if (__nfp_ccm_get_tag(skb
) == tag
)
210 skb
= skb_queue_next(&nn
->mbox_cmsg
.queue
, skb
);
214 static void nfp_ccm_mbox_copy_out(struct nfp_net
*nn
, struct sk_buff
*last
)
216 struct nfp_ccm_mbox_cmsg_cb
*cb
;
217 u8 __iomem
*data
, *end
;
220 data
= nn
->dp
.ctrl_bar
+ nn
->tlv_caps
.mbox_off
+
221 NFP_NET_CFG_MBOX_SIMPLE_VAL
;
222 end
= data
+ nn
->tlv_caps
.mbox_len
;
225 unsigned int length
, offset
, type
;
226 struct nfp_ccm_hdr hdr
;
229 tlv_hdr
= readl(data
);
230 type
= FIELD_GET(NFP_NET_MBOX_TLV_TYPE
, tlv_hdr
);
231 length
= FIELD_GET(NFP_NET_MBOX_TLV_LEN
, tlv_hdr
);
232 offset
= data
- nn
->dp
.ctrl_bar
;
234 /* Advance past the header */
237 if (data
+ length
> end
) {
238 nn_dp_warn(&nn
->dp
, "mailbox oversized TLV type:%d offset:%u len:%u\n",
239 type
, offset
, length
);
243 if (type
== NFP_NET_MBOX_TLV_TYPE_END
)
245 if (type
== NFP_NET_MBOX_TLV_TYPE_RESV
)
247 if (type
!= NFP_NET_MBOX_TLV_TYPE_MSG
&&
248 type
!= NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP
) {
249 nn_dp_warn(&nn
->dp
, "mailbox unknown TLV type:%d offset:%u len:%u\n",
250 type
, offset
, length
);
255 nn_dp_warn(&nn
->dp
, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
256 type
, offset
, length
);
260 hdr
.raw
= cpu_to_be32(readl(data
));
262 skb
= nfp_ccm_mbox_find_req(nn
, hdr
.tag
, last
);
264 nn_dp_warn(&nn
->dp
, "mailbox request not found:%u\n",
265 be16_to_cpu(hdr
.tag
));
268 cb
= (void *)skb
->cb
;
270 if (type
== NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP
) {
272 "mailbox msg not supported type:%d\n",
273 nfp_ccm_get_type(skb
));
278 if (hdr
.type
!= __NFP_CCM_REPLY(nfp_ccm_get_type(skb
))) {
279 nn_dp_warn(&nn
->dp
, "mailbox msg reply wrong type:%u expected:%lu\n",
281 __NFP_CCM_REPLY(nfp_ccm_get_type(skb
)));
285 if (cb
->exp_reply
&& length
!= cb
->exp_reply
) {
286 nn_dp_warn(&nn
->dp
, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
287 hdr
.type
, length
, cb
->exp_reply
);
291 if (length
> cb
->max_len
) {
292 nn_dp_warn(&nn
->dp
, "mailbox msg oversized reply type:%u max:%u have:%u\n",
293 hdr
.type
, cb
->max_len
, length
);
302 if (length
<= skb
->len
)
303 __skb_trim(skb
, length
);
305 skb_put(skb
, length
- skb
->len
);
307 /* We overcopy here slightly, but that's okay,
308 * the skb is large enough, and the garbage will
309 * be ignored (beyond skb->len).
311 skb_data
= (__be32
*)skb
->data
;
312 memcpy(skb_data
, &hdr
, 4);
314 cnt
= DIV_ROUND_UP(length
, 4);
315 for (i
= 1 ; i
< cnt
; i
++)
316 skb_data
[i
] = cpu_to_be32(readl(data
+ i
* 4));
319 cb
->state
= NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND
;
321 data
+= round_up(length
, 4);
322 if (data
+ 4 > end
) {
324 "reached end of MBOX without END TLV\n");
329 smp_wmb(); /* order the skb->data vs. cb->state */
330 spin_lock_bh(&nn
->mbox_cmsg
.queue
.lock
);
332 skb
= __skb_dequeue(&nn
->mbox_cmsg
.queue
);
333 cb
= (void *)skb
->cb
;
335 if (cb
->state
!= NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND
) {
337 smp_wmb(); /* order the cb->err vs. cb->state */
339 cb
->state
= NFP_NET_MBOX_CMSG_STATE_DONE
;
344 "mailbox posted msg failed type:%u err:%d\n",
345 nfp_ccm_get_type(skb
), cb
->err
);
346 dev_consume_skb_any(skb
);
348 } while (skb
!= last
);
350 nfp_ccm_mbox_mark_next_runner(nn
);
351 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
355 nfp_ccm_mbox_mark_all_err(struct nfp_net
*nn
, struct sk_buff
*last
, int err
)
357 struct nfp_ccm_mbox_cmsg_cb
*cb
;
360 spin_lock_bh(&nn
->mbox_cmsg
.queue
.lock
);
362 skb
= __skb_dequeue(&nn
->mbox_cmsg
.queue
);
363 cb
= (void *)skb
->cb
;
366 smp_wmb(); /* order the cb->err vs. cb->state */
367 cb
->state
= NFP_NET_MBOX_CMSG_STATE_DONE
;
368 } while (skb
!= last
);
370 nfp_ccm_mbox_mark_next_runner(nn
);
371 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
374 static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net
*nn
)
375 __releases(&nn
->mbox_cmsg
.queue
.lock
)
377 int space
= nn
->tlv_caps
.mbox_len
- NFP_NET_CFG_MBOX_SIMPLE_VAL
;
378 struct sk_buff
*skb
, *last
;
381 space
-= 4; /* for End TLV */
383 /* First skb must fit, because it's ours and we checked it fits */
385 last
= skb
= __skb_peek(&nn
->mbox_cmsg
.queue
);
386 space
-= 4 + nfp_ccm_mbox_maxlen(skb
);
388 while (!skb_queue_is_last(&nn
->mbox_cmsg
.queue
, last
)) {
389 skb
= skb_queue_next(&nn
->mbox_cmsg
.queue
, last
);
390 space
-= 4 + nfp_ccm_mbox_maxlen(skb
);
394 nfp_ccm_mbox_set_busy(skb
);
396 if (cnt
== NFP_CCM_MBOX_BATCH_LIMIT
)
399 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
401 /* Now we own all skb's marked in progress, new requests may arrive
402 * at the end of the queue.
405 nn_ctrl_bar_lock(nn
);
407 nfp_ccm_mbox_copy_in(nn
, last
);
409 err
= nfp_net_mbox_reconfig(nn
, NFP_NET_CFG_MBOX_CMD_TLV_CMSG
);
411 nfp_ccm_mbox_copy_out(nn
, last
);
413 nfp_ccm_mbox_mark_all_err(nn
, last
, -EIO
);
415 nn_ctrl_bar_unlock(nn
);
417 wake_up_all(&nn
->mbox_cmsg
.wq
);
420 static int nfp_ccm_mbox_skb_return(struct sk_buff
*skb
)
422 struct nfp_ccm_mbox_cmsg_cb
*cb
= (void *)skb
->cb
;
425 dev_kfree_skb_any(skb
);
429 /* If wait timed out but the command is already in progress we have
430 * to wait until it finishes. Runners has ownership of the skbs marked
434 nfp_ccm_mbox_unlink_unlock(struct nfp_net
*nn
, struct sk_buff
*skb
,
435 enum nfp_ccm_type type
)
436 __releases(&nn
->mbox_cmsg
.queue
.lock
)
440 if (nfp_ccm_mbox_in_progress(skb
)) {
441 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
443 wait_event(nn
->mbox_cmsg
.wq
, nfp_ccm_mbox_done(skb
));
444 smp_rmb(); /* pairs with smp_wmb() after data is written */
445 return nfp_ccm_mbox_skb_return(skb
);
448 was_first
= nfp_ccm_mbox_should_run(nn
, skb
);
449 __skb_unlink(skb
, &nn
->mbox_cmsg
.queue
);
451 nfp_ccm_mbox_mark_next_runner(nn
);
453 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
456 wake_up_all(&nn
->mbox_cmsg
.wq
);
458 nn_dp_warn(&nn
->dp
, "time out waiting for mbox response to 0x%02x\n",
464 nfp_ccm_mbox_msg_prepare(struct nfp_net
*nn
, struct sk_buff
*skb
,
465 enum nfp_ccm_type type
,
466 unsigned int reply_size
, unsigned int max_reply_size
,
469 const unsigned int mbox_max
= nfp_ccm_mbox_max_msg(nn
);
470 unsigned int max_len
;
474 if (unlikely(!(nn
->tlv_caps
.mbox_cmsg_types
& BIT(type
)))) {
476 "message type %d not supported by mailbox\n", type
);
480 /* If the reply size is unknown assume it will take the entire
481 * mailbox, the callers should do their best for this to never
485 max_reply_size
= mbox_max
;
486 max_reply_size
= round_up(max_reply_size
, 4);
488 /* Make sure we can fit the entire reply into the skb,
489 * and that we don't have to slow down the mbox handler
492 undersize
= max_reply_size
- (skb_end_pointer(skb
) - skb
->data
);
494 err
= pskb_expand_head(skb
, 0, undersize
, flags
);
497 "can't allocate reply buffer for mailbox\n");
502 /* Make sure that request and response both fit into the mailbox */
503 max_len
= max(max_reply_size
, round_up(skb
->len
, 4));
504 if (max_len
> mbox_max
) {
506 "message too big for tha mailbox: %u/%u vs %u\n",
507 skb
->len
, max_reply_size
, mbox_max
);
511 nfp_ccm_mbox_msg_init(skb
, reply_size
, max_len
);
517 nfp_ccm_mbox_msg_enqueue(struct nfp_net
*nn
, struct sk_buff
*skb
,
518 enum nfp_ccm_type type
, bool critical
)
520 struct nfp_ccm_hdr
*hdr
;
522 assert_spin_locked(&nn
->mbox_cmsg
.queue
.lock
);
524 if (!critical
&& nn
->mbox_cmsg
.queue
.qlen
>= NFP_CCM_MAX_QLEN
) {
525 nn_dp_warn(&nn
->dp
, "mailbox request queue too long\n");
529 hdr
= (void *)skb
->data
;
530 hdr
->ver
= NFP_CCM_ABI_VERSION
;
532 hdr
->tag
= cpu_to_be16(nn
->mbox_cmsg
.tag
++);
534 __skb_queue_tail(&nn
->mbox_cmsg
.queue
, skb
);
539 int __nfp_ccm_mbox_communicate(struct nfp_net
*nn
, struct sk_buff
*skb
,
540 enum nfp_ccm_type type
,
541 unsigned int reply_size
,
542 unsigned int max_reply_size
, bool critical
)
546 err
= nfp_ccm_mbox_msg_prepare(nn
, skb
, type
, reply_size
,
547 max_reply_size
, GFP_KERNEL
);
551 spin_lock_bh(&nn
->mbox_cmsg
.queue
.lock
);
553 err
= nfp_ccm_mbox_msg_enqueue(nn
, skb
, type
, critical
);
557 /* First in queue takes the mailbox lock and processes the batch */
558 if (!nfp_ccm_mbox_is_first(nn
, skb
)) {
561 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
563 to
= !wait_event_timeout(nn
->mbox_cmsg
.wq
,
564 nfp_ccm_mbox_done(skb
) ||
565 nfp_ccm_mbox_should_run(nn
, skb
),
566 msecs_to_jiffies(NFP_CCM_TIMEOUT
));
568 /* fast path for those completed by another thread */
569 if (nfp_ccm_mbox_done(skb
)) {
570 smp_rmb(); /* pairs with wmb after data is written */
571 return nfp_ccm_mbox_skb_return(skb
);
574 spin_lock_bh(&nn
->mbox_cmsg
.queue
.lock
);
576 if (!nfp_ccm_mbox_is_first(nn
, skb
)) {
579 err
= nfp_ccm_mbox_unlink_unlock(nn
, skb
, type
);
586 /* run queue expects the lock held */
587 nfp_ccm_mbox_run_queue_unlock(nn
);
588 return nfp_ccm_mbox_skb_return(skb
);
591 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
593 dev_kfree_skb_any(skb
);
597 int nfp_ccm_mbox_communicate(struct nfp_net
*nn
, struct sk_buff
*skb
,
598 enum nfp_ccm_type type
,
599 unsigned int reply_size
,
600 unsigned int max_reply_size
)
602 return __nfp_ccm_mbox_communicate(nn
, skb
, type
, reply_size
,
603 max_reply_size
, false);
606 static void nfp_ccm_mbox_post_runq_work(struct work_struct
*work
)
611 nn
= container_of(work
, struct nfp_net
, mbox_cmsg
.runq_work
);
613 spin_lock_bh(&nn
->mbox_cmsg
.queue
.lock
);
615 skb
= __skb_peek(&nn
->mbox_cmsg
.queue
);
616 if (WARN_ON(!skb
|| !nfp_ccm_mbox_is_posted(skb
) ||
617 !nfp_ccm_mbox_should_run(nn
, skb
))) {
618 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
622 nfp_ccm_mbox_run_queue_unlock(nn
);
625 static void nfp_ccm_mbox_post_wait_work(struct work_struct
*work
)
631 nn
= container_of(work
, struct nfp_net
, mbox_cmsg
.wait_work
);
633 skb
= skb_peek(&nn
->mbox_cmsg
.queue
);
634 if (WARN_ON(!skb
|| !nfp_ccm_mbox_is_posted(skb
)))
635 /* Should never happen so it's unclear what to do here.. */
636 goto exit_unlock_wake
;
638 err
= nfp_net_mbox_reconfig_wait_posted(nn
);
640 nfp_ccm_mbox_copy_out(nn
, skb
);
642 nfp_ccm_mbox_mark_all_err(nn
, skb
, -EIO
);
644 nn_ctrl_bar_unlock(nn
);
645 wake_up_all(&nn
->mbox_cmsg
.wq
);
648 int nfp_ccm_mbox_post(struct nfp_net
*nn
, struct sk_buff
*skb
,
649 enum nfp_ccm_type type
, unsigned int max_reply_size
)
653 err
= nfp_ccm_mbox_msg_prepare(nn
, skb
, type
, 0, max_reply_size
,
658 nfp_ccm_mbox_mark_posted(skb
);
660 spin_lock_bh(&nn
->mbox_cmsg
.queue
.lock
);
662 err
= nfp_ccm_mbox_msg_enqueue(nn
, skb
, type
, false);
666 if (nfp_ccm_mbox_is_first(nn
, skb
)) {
667 if (nn_ctrl_bar_trylock(nn
)) {
668 nfp_ccm_mbox_copy_in(nn
, skb
);
669 nfp_net_mbox_reconfig_post(nn
,
670 NFP_NET_CFG_MBOX_CMD_TLV_CMSG
);
671 queue_work(nn
->mbox_cmsg
.workq
,
672 &nn
->mbox_cmsg
.wait_work
);
674 nfp_ccm_mbox_mark_next_runner(nn
);
678 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
683 spin_unlock_bh(&nn
->mbox_cmsg
.queue
.lock
);
685 dev_kfree_skb_any(skb
);
690 nfp_ccm_mbox_msg_alloc(struct nfp_net
*nn
, unsigned int req_size
,
691 unsigned int reply_size
, gfp_t flags
)
693 unsigned int max_size
;
697 max_size
= nfp_ccm_mbox_max_msg(nn
);
699 max_size
= max(req_size
, reply_size
);
700 max_size
= round_up(max_size
, 4);
702 skb
= alloc_skb(max_size
, flags
);
706 skb_put(skb
, req_size
);
711 bool nfp_ccm_mbox_fits(struct nfp_net
*nn
, unsigned int size
)
713 return nfp_ccm_mbox_max_msg(nn
) >= size
;
716 int nfp_ccm_mbox_init(struct nfp_net
*nn
)
721 void nfp_ccm_mbox_clean(struct nfp_net
*nn
)
723 drain_workqueue(nn
->mbox_cmsg
.workq
);
726 int nfp_ccm_mbox_alloc(struct nfp_net
*nn
)
728 skb_queue_head_init(&nn
->mbox_cmsg
.queue
);
729 init_waitqueue_head(&nn
->mbox_cmsg
.wq
);
730 INIT_WORK(&nn
->mbox_cmsg
.wait_work
, nfp_ccm_mbox_post_wait_work
);
731 INIT_WORK(&nn
->mbox_cmsg
.runq_work
, nfp_ccm_mbox_post_runq_work
);
733 nn
->mbox_cmsg
.workq
= alloc_workqueue("nfp-ccm-mbox", WQ_UNBOUND
, 0);
734 if (!nn
->mbox_cmsg
.workq
)
739 void nfp_ccm_mbox_free(struct nfp_net
*nn
)
741 destroy_workqueue(nn
->mbox_cmsg
.workq
);
742 WARN_ON(!skb_queue_empty(&nn
->mbox_cmsg
.queue
));