1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (C) 2020 Marvell. */
4 #include "otx2_cpt_common.h"
5 #include "otx2_cptpf.h"
8 /* Fastpath ipsec opcode with inplace processing */
9 #define CPT_INLINE_RX_OPCODE (0x26 | (1 << 6))
10 #define CN10K_CPT_INLINE_RX_OPCODE (0x29 | (1 << 6))
12 #define cpt_inline_rx_opcode(pdev) \
15 if (is_dev_otx2(pdev)) \
16 opcode = CPT_INLINE_RX_OPCODE; \
18 opcode = CN10K_CPT_INLINE_RX_OPCODE; \
23 * CPT PF driver version, It will be incremented by 1 for every feature
24 * addition in CPT mailbox messages.
26 #define OTX2_CPT_PF_DRV_VERSION 0x1
28 static int forward_to_af(struct otx2_cptpf_dev
*cptpf
,
29 struct otx2_cptvf_info
*vf
,
30 struct mbox_msghdr
*req
, int size
)
32 struct mbox_msghdr
*msg
;
35 mutex_lock(&cptpf
->lock
);
36 msg
= otx2_mbox_alloc_msg(&cptpf
->afpf_mbox
, 0, size
);
38 mutex_unlock(&cptpf
->lock
);
42 memcpy((uint8_t *)msg
+ sizeof(struct mbox_msghdr
),
43 (uint8_t *)req
+ sizeof(struct mbox_msghdr
), size
);
45 msg
->pcifunc
= req
->pcifunc
;
49 ret
= otx2_cpt_sync_mbox_msg(&cptpf
->afpf_mbox
);
50 /* Error code -EIO indicate there is a communication failure
51 * to the AF. Rest of the error codes indicate that AF processed
52 * VF messages and set the error codes in response messages
53 * (if any) so simply forward responses to VF.
56 dev_warn(&cptpf
->pdev
->dev
,
57 "AF not responding to VF%d messages\n", vf
->vf_id
);
58 mutex_unlock(&cptpf
->lock
);
61 mutex_unlock(&cptpf
->lock
);
65 static int handle_msg_get_caps(struct otx2_cptpf_dev
*cptpf
,
66 struct otx2_cptvf_info
*vf
,
67 struct mbox_msghdr
*req
)
69 struct otx2_cpt_caps_rsp
*rsp
;
71 rsp
= (struct otx2_cpt_caps_rsp
*)
72 otx2_mbox_alloc_msg(&cptpf
->vfpf_mbox
, vf
->vf_id
,
77 rsp
->hdr
.id
= MBOX_MSG_GET_CAPS
;
78 rsp
->hdr
.sig
= OTX2_MBOX_RSP_SIG
;
79 rsp
->hdr
.pcifunc
= req
->pcifunc
;
80 rsp
->cpt_pf_drv_version
= OTX2_CPT_PF_DRV_VERSION
;
81 rsp
->cpt_revision
= cptpf
->eng_grps
.rid
;
82 memcpy(&rsp
->eng_caps
, &cptpf
->eng_caps
, sizeof(rsp
->eng_caps
));
87 static int handle_msg_get_eng_grp_num(struct otx2_cptpf_dev
*cptpf
,
88 struct otx2_cptvf_info
*vf
,
89 struct mbox_msghdr
*req
)
91 struct otx2_cpt_egrp_num_msg
*grp_req
;
92 struct otx2_cpt_egrp_num_rsp
*rsp
;
94 grp_req
= (struct otx2_cpt_egrp_num_msg
*)req
;
95 rsp
= (struct otx2_cpt_egrp_num_rsp
*)
96 otx2_mbox_alloc_msg(&cptpf
->vfpf_mbox
, vf
->vf_id
, sizeof(*rsp
));
100 rsp
->hdr
.id
= MBOX_MSG_GET_ENG_GRP_NUM
;
101 rsp
->hdr
.sig
= OTX2_MBOX_RSP_SIG
;
102 rsp
->hdr
.pcifunc
= req
->pcifunc
;
103 rsp
->eng_type
= grp_req
->eng_type
;
104 rsp
->eng_grp_num
= otx2_cpt_get_eng_grp(&cptpf
->eng_grps
,
110 static int handle_msg_kvf_limits(struct otx2_cptpf_dev
*cptpf
,
111 struct otx2_cptvf_info
*vf
,
112 struct mbox_msghdr
*req
)
114 struct otx2_cpt_kvf_limits_rsp
*rsp
;
116 rsp
= (struct otx2_cpt_kvf_limits_rsp
*)
117 otx2_mbox_alloc_msg(&cptpf
->vfpf_mbox
, vf
->vf_id
, sizeof(*rsp
));
121 rsp
->hdr
.id
= MBOX_MSG_GET_KVF_LIMITS
;
122 rsp
->hdr
.sig
= OTX2_MBOX_RSP_SIG
;
123 rsp
->hdr
.pcifunc
= req
->pcifunc
;
124 rsp
->kvf_limits
= cptpf
->kvf_limits
;
129 static int send_inline_ipsec_inbound_msg(struct otx2_cptpf_dev
*cptpf
,
130 int sso_pf_func
, u8 slot
)
132 struct cpt_inline_ipsec_cfg_msg
*req
;
133 struct pci_dev
*pdev
= cptpf
->pdev
;
135 req
= (struct cpt_inline_ipsec_cfg_msg
*)
136 otx2_mbox_alloc_msg_rsp(&cptpf
->afpf_mbox
, 0,
137 sizeof(*req
), sizeof(struct msg_rsp
));
139 dev_err(&pdev
->dev
, "RVU MBOX failed to get message.\n");
142 memset(req
, 0, sizeof(*req
));
143 req
->hdr
.id
= MBOX_MSG_CPT_INLINE_IPSEC_CFG
;
144 req
->hdr
.sig
= OTX2_MBOX_REQ_SIG
;
145 req
->hdr
.pcifunc
= OTX2_CPT_RVU_PFFUNC(cptpf
->pf_id
, 0);
146 req
->dir
= CPT_INLINE_INBOUND
;
148 req
->sso_pf_func_ovrd
= cptpf
->sso_pf_func_ovrd
;
149 req
->sso_pf_func
= sso_pf_func
;
152 return otx2_cpt_send_mbox_msg(&cptpf
->afpf_mbox
, pdev
);
155 static int rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev
*cptpf
, u8 egrp
,
156 struct otx2_cpt_rx_inline_lf_cfg
*req
)
158 struct nix_inline_ipsec_cfg
*nix_req
;
159 struct pci_dev
*pdev
= cptpf
->pdev
;
162 nix_req
= (struct nix_inline_ipsec_cfg
*)
163 otx2_mbox_alloc_msg_rsp(&cptpf
->afpf_mbox
, 0,
165 sizeof(struct msg_rsp
));
166 if (nix_req
== NULL
) {
167 dev_err(&pdev
->dev
, "RVU MBOX failed to get message.\n");
170 memset(nix_req
, 0, sizeof(*nix_req
));
171 nix_req
->hdr
.id
= MBOX_MSG_NIX_INLINE_IPSEC_CFG
;
172 nix_req
->hdr
.sig
= OTX2_MBOX_REQ_SIG
;
174 nix_req
->credit_th
= req
->credit_th
;
175 nix_req
->bpid
= req
->bpid
;
176 if (!req
->credit
|| req
->credit
> OTX2_CPT_INST_QLEN_MSGS
)
177 nix_req
->cpt_credit
= OTX2_CPT_INST_QLEN_MSGS
- 1;
179 nix_req
->cpt_credit
= req
->credit
- 1;
180 nix_req
->gen_cfg
.egrp
= egrp
;
182 nix_req
->gen_cfg
.opcode
= req
->opcode
;
184 nix_req
->gen_cfg
.opcode
= cpt_inline_rx_opcode(pdev
);
185 nix_req
->gen_cfg
.param1
= req
->param1
;
186 nix_req
->gen_cfg
.param2
= req
->param2
;
187 nix_req
->inst_qsel
.cpt_pf_func
= OTX2_CPT_RVU_PFFUNC(cptpf
->pf_id
, 0);
188 nix_req
->inst_qsel
.cpt_slot
= 0;
189 ret
= otx2_cpt_send_mbox_msg(&cptpf
->afpf_mbox
, pdev
);
193 if (cptpf
->has_cpt1
) {
194 ret
= send_inline_ipsec_inbound_msg(cptpf
, req
->sso_pf_func
, 1);
199 return send_inline_ipsec_inbound_msg(cptpf
, req
->sso_pf_func
, 0);
203 otx2_inline_cptlf_setup(struct otx2_cptpf_dev
*cptpf
,
204 struct otx2_cptlfs_info
*lfs
, u8 egrp
, int num_lfs
)
208 ret
= otx2_cptlf_init(lfs
, 1 << egrp
, OTX2_CPT_QUEUE_HI_PRIO
, 1);
210 dev_err(&cptpf
->pdev
->dev
,
211 "LF configuration failed for RX inline ipsec.\n");
215 /* Get msix offsets for attached LFs */
216 ret
= otx2_cpt_msix_offset_msg(lfs
);
220 /* Register for CPT LF Misc interrupts */
221 ret
= otx2_cptlf_register_misc_interrupts(lfs
);
227 otx2_cptlf_unregister_misc_interrupts(lfs
);
229 otx2_cptlf_shutdown(lfs
);
234 otx2_inline_cptlf_cleanup(struct otx2_cptlfs_info
*lfs
)
236 /* Unregister misc interrupt */
237 otx2_cptlf_unregister_misc_interrupts(lfs
);
240 otx2_cptlf_shutdown(lfs
);
243 static int handle_msg_rx_inline_ipsec_lf_cfg(struct otx2_cptpf_dev
*cptpf
,
244 struct mbox_msghdr
*req
)
246 struct otx2_cpt_rx_inline_lf_cfg
*cfg_req
;
247 int num_lfs
= 1, ret
;
250 cfg_req
= (struct otx2_cpt_rx_inline_lf_cfg
*)req
;
251 if (cptpf
->lfs
.lfs_num
) {
252 dev_err(&cptpf
->pdev
->dev
,
253 "LF is already configured for RX inline ipsec.\n");
257 * Allow LFs to execute requests destined to only grp IE_TYPES and
258 * set queue priority of each LF to high
260 egrp
= otx2_cpt_get_eng_grp(&cptpf
->eng_grps
, OTX2_CPT_IE_TYPES
);
261 if (egrp
== OTX2_CPT_INVALID_CRYPTO_ENG_GRP
) {
262 dev_err(&cptpf
->pdev
->dev
,
263 "Engine group for inline ipsec is not available\n");
267 otx2_cptlf_set_dev_info(&cptpf
->lfs
, cptpf
->pdev
, cptpf
->reg_base
,
268 &cptpf
->afpf_mbox
, BLKADDR_CPT0
);
269 cptpf
->lfs
.global_slot
= 0;
270 cptpf
->lfs
.ctx_ilen_ovrd
= cfg_req
->ctx_ilen_valid
;
271 cptpf
->lfs
.ctx_ilen
= cfg_req
->ctx_ilen
;
273 ret
= otx2_inline_cptlf_setup(cptpf
, &cptpf
->lfs
, egrp
, num_lfs
);
275 dev_err(&cptpf
->pdev
->dev
, "Inline-Ipsec CPT0 LF setup failed.\n");
279 if (cptpf
->has_cpt1
) {
280 cptpf
->rsrc_req_blkaddr
= BLKADDR_CPT1
;
281 otx2_cptlf_set_dev_info(&cptpf
->cpt1_lfs
, cptpf
->pdev
,
282 cptpf
->reg_base
, &cptpf
->afpf_mbox
,
284 cptpf
->cpt1_lfs
.global_slot
= num_lfs
;
285 cptpf
->cpt1_lfs
.ctx_ilen_ovrd
= cfg_req
->ctx_ilen_valid
;
286 cptpf
->cpt1_lfs
.ctx_ilen
= cfg_req
->ctx_ilen
;
287 ret
= otx2_inline_cptlf_setup(cptpf
, &cptpf
->cpt1_lfs
, egrp
,
290 dev_err(&cptpf
->pdev
->dev
, "Inline CPT1 LF setup failed.\n");
293 cptpf
->rsrc_req_blkaddr
= 0;
296 ret
= rx_inline_ipsec_lf_cfg(cptpf
, egrp
, cfg_req
);
303 otx2_inline_cptlf_cleanup(&cptpf
->cpt1_lfs
);
305 otx2_inline_cptlf_cleanup(&cptpf
->lfs
);
309 static int cptpf_handle_vf_req(struct otx2_cptpf_dev
*cptpf
,
310 struct otx2_cptvf_info
*vf
,
311 struct mbox_msghdr
*req
, int size
)
315 /* Check if msg is valid, if not reply with an invalid msg */
316 if (req
->sig
!= OTX2_MBOX_REQ_SIG
)
320 case MBOX_MSG_GET_ENG_GRP_NUM
:
321 err
= handle_msg_get_eng_grp_num(cptpf
, vf
, req
);
323 case MBOX_MSG_GET_CAPS
:
324 err
= handle_msg_get_caps(cptpf
, vf
, req
);
326 case MBOX_MSG_GET_KVF_LIMITS
:
327 err
= handle_msg_kvf_limits(cptpf
, vf
, req
);
329 case MBOX_MSG_RX_INLINE_IPSEC_LF_CFG
:
330 err
= handle_msg_rx_inline_ipsec_lf_cfg(cptpf
, req
);
334 err
= forward_to_af(cptpf
, vf
, req
, size
);
340 otx2_reply_invalid_msg(&cptpf
->vfpf_mbox
, vf
->vf_id
, 0, req
->id
);
341 otx2_mbox_msg_send(&cptpf
->vfpf_mbox
, vf
->vf_id
);
345 irqreturn_t
otx2_cptpf_vfpf_mbox_intr(int __always_unused irq
, void *arg
)
347 struct otx2_cptpf_dev
*cptpf
= arg
;
348 struct otx2_cptvf_info
*vf
;
353 * Check which VF has raised an interrupt and schedule
354 * corresponding work queue to process the messages
356 for (i
= 0; i
< 2; i
++) {
357 /* Read the interrupt bits */
358 intr
= otx2_cpt_read64(cptpf
->reg_base
, BLKADDR_RVUM
, 0,
359 RVU_PF_VFPF_MBOX_INTX(i
));
361 for (vf_idx
= i
* 64; vf_idx
< cptpf
->enabled_vfs
; vf_idx
++) {
362 vf
= &cptpf
->vf
[vf_idx
];
363 if (intr
& (1ULL << vf
->intr_idx
)) {
364 queue_work(cptpf
->vfpf_mbox_wq
,
365 &vf
->vfpf_mbox_work
);
366 /* Clear the interrupt */
367 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
,
368 0, RVU_PF_VFPF_MBOX_INTX(i
),
369 BIT_ULL(vf
->intr_idx
));
376 void otx2_cptpf_vfpf_mbox_handler(struct work_struct
*work
)
378 struct otx2_cptpf_dev
*cptpf
;
379 struct otx2_cptvf_info
*vf
;
380 struct otx2_mbox_dev
*mdev
;
381 struct mbox_hdr
*req_hdr
;
382 struct mbox_msghdr
*msg
;
383 struct otx2_mbox
*mbox
;
386 vf
= container_of(work
, struct otx2_cptvf_info
, vfpf_mbox_work
);
388 mbox
= &cptpf
->vfpf_mbox
;
389 /* sync with mbox memory region */
391 mdev
= &mbox
->dev
[vf
->vf_id
];
392 /* Process received mbox messages */
393 req_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
394 offset
= mbox
->rx_start
+ ALIGN(sizeof(*req_hdr
), MBOX_MSG_ALIGN
);
396 for (i
= 0; i
< req_hdr
->num_msgs
; i
++) {
397 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
399 /* Set which VF sent this message based on mbox IRQ */
400 msg
->pcifunc
= ((u16
)cptpf
->pf_id
<< RVU_PFVF_PF_SHIFT
) |
401 ((vf
->vf_id
+ 1) & RVU_PFVF_FUNC_MASK
);
403 err
= cptpf_handle_vf_req(cptpf
, vf
, msg
,
404 msg
->next_msgoff
- offset
);
406 * Behave as the AF, drop the msg if there is
407 * no memory, timeout handling also goes here
409 if (err
== -ENOMEM
|| err
== -EIO
)
411 offset
= msg
->next_msgoff
;
412 /* Write barrier required for VF responses which are handled by
413 * PF driver and not forwarded to AF.
417 /* Send mbox responses to VF */
419 otx2_mbox_msg_send(mbox
, vf
->vf_id
);
422 irqreturn_t
otx2_cptpf_afpf_mbox_intr(int __always_unused irq
, void *arg
)
424 struct otx2_cptpf_dev
*cptpf
= arg
;
425 struct otx2_mbox_dev
*mdev
;
426 struct otx2_mbox
*mbox
;
427 struct mbox_hdr
*hdr
;
430 /* Read the interrupt bits */
431 intr
= otx2_cpt_read64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_INT
);
434 mbox
= &cptpf
->afpf_mbox
;
435 mdev
= &mbox
->dev
[0];
436 hdr
= mdev
->mbase
+ mbox
->rx_start
;
438 /* Schedule work queue function to process the MBOX request */
439 queue_work(cptpf
->afpf_mbox_wq
, &cptpf
->afpf_mbox_work
);
441 mbox
= &cptpf
->afpf_mbox_up
;
442 mdev
= &mbox
->dev
[0];
443 hdr
= mdev
->mbase
+ mbox
->rx_start
;
445 /* Schedule work queue function to process the MBOX request */
446 queue_work(cptpf
->afpf_mbox_wq
, &cptpf
->afpf_mbox_up_work
);
447 /* Clear and ack the interrupt */
448 otx2_cpt_write64(cptpf
->reg_base
, BLKADDR_RVUM
, 0, RVU_PF_INT
,
454 static void process_afpf_mbox_msg(struct otx2_cptpf_dev
*cptpf
,
455 struct mbox_msghdr
*msg
)
457 struct otx2_cptlfs_info
*lfs
= &cptpf
->lfs
;
458 struct device
*dev
= &cptpf
->pdev
->dev
;
459 struct cpt_rd_wr_reg_msg
*rsp_rd_wr
;
460 struct msix_offset_rsp
*rsp_msix
;
463 if (msg
->id
>= MBOX_MSG_MAX
) {
464 dev_err(dev
, "MBOX msg with unknown ID %d\n", msg
->id
);
467 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
468 dev_err(dev
, "MBOX msg with wrong signature %x, ID %d\n",
472 if (cptpf
->rsrc_req_blkaddr
== BLKADDR_CPT1
)
473 lfs
= &cptpf
->cpt1_lfs
;
477 cptpf
->pf_id
= (msg
->pcifunc
>> RVU_PFVF_PF_SHIFT
) &
480 case MBOX_MSG_MSIX_OFFSET
:
481 rsp_msix
= (struct msix_offset_rsp
*) msg
;
482 for (i
= 0; i
< rsp_msix
->cptlfs
; i
++)
483 lfs
->lf
[i
].msix_offset
= rsp_msix
->cptlf_msixoff
[i
];
485 for (i
= 0; i
< rsp_msix
->cpt1_lfs
; i
++)
486 lfs
->lf
[i
].msix_offset
= rsp_msix
->cpt1_lf_msixoff
[i
];
488 case MBOX_MSG_CPT_RD_WR_REGISTER
:
489 rsp_rd_wr
= (struct cpt_rd_wr_reg_msg
*)msg
;
491 dev_err(dev
, "Reg %llx rd/wr(%d) failed %d\n",
492 rsp_rd_wr
->reg_offset
, rsp_rd_wr
->is_write
,
496 if (!rsp_rd_wr
->is_write
)
497 *rsp_rd_wr
->ret_val
= rsp_rd_wr
->val
;
499 case MBOX_MSG_ATTACH_RESOURCES
:
501 lfs
->are_lfs_attached
= 1;
503 case MBOX_MSG_DETACH_RESOURCES
:
505 lfs
->are_lfs_attached
= 0;
507 case MBOX_MSG_CPT_INLINE_IPSEC_CFG
:
508 case MBOX_MSG_NIX_INLINE_IPSEC_CFG
:
509 case MBOX_MSG_CPT_LF_RESET
:
514 "Unsupported msg %d received.\n", msg
->id
);
519 static void forward_to_vf(struct otx2_cptpf_dev
*cptpf
, struct mbox_msghdr
*msg
,
522 struct otx2_mbox
*vfpf_mbox
;
523 struct mbox_msghdr
*fwd
;
525 if (msg
->id
>= MBOX_MSG_MAX
) {
526 dev_err(&cptpf
->pdev
->dev
,
527 "MBOX msg with unknown ID %d\n", msg
->id
);
530 if (msg
->sig
!= OTX2_MBOX_RSP_SIG
) {
531 dev_err(&cptpf
->pdev
->dev
,
532 "MBOX msg with wrong signature %x, ID %d\n",
536 vfpf_mbox
= &cptpf
->vfpf_mbox
;
538 if (vf_id
>= cptpf
->enabled_vfs
) {
539 dev_err(&cptpf
->pdev
->dev
,
540 "MBOX msg to unknown VF: %d >= %d\n",
541 vf_id
, cptpf
->enabled_vfs
);
544 if (msg
->id
== MBOX_MSG_VF_FLR
)
547 fwd
= otx2_mbox_alloc_msg(vfpf_mbox
, vf_id
, size
);
549 dev_err(&cptpf
->pdev
->dev
,
550 "Forwarding to VF%d failed.\n", vf_id
);
553 memcpy((uint8_t *)fwd
+ sizeof(struct mbox_msghdr
),
554 (uint8_t *)msg
+ sizeof(struct mbox_msghdr
), size
);
556 fwd
->pcifunc
= msg
->pcifunc
;
562 /* Handle mailbox messages received from AF */
563 void otx2_cptpf_afpf_mbox_handler(struct work_struct
*work
)
565 struct otx2_cptpf_dev
*cptpf
;
566 struct otx2_mbox
*afpf_mbox
;
567 struct otx2_mbox_dev
*mdev
;
568 struct mbox_hdr
*rsp_hdr
;
569 struct mbox_msghdr
*msg
;
570 int offset
, vf_id
, i
;
572 cptpf
= container_of(work
, struct otx2_cptpf_dev
, afpf_mbox_work
);
573 afpf_mbox
= &cptpf
->afpf_mbox
;
574 mdev
= &afpf_mbox
->dev
[0];
575 /* Sync mbox data into memory */
578 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ afpf_mbox
->rx_start
);
579 offset
= ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
581 for (i
= 0; i
< rsp_hdr
->num_msgs
; i
++) {
582 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ afpf_mbox
->rx_start
+
584 vf_id
= (msg
->pcifunc
>> RVU_PFVF_FUNC_SHIFT
) &
587 forward_to_vf(cptpf
, msg
, vf_id
,
588 msg
->next_msgoff
- offset
);
590 process_afpf_mbox_msg(cptpf
, msg
);
592 offset
= msg
->next_msgoff
;
593 /* Sync VF response ready to be sent */
597 otx2_mbox_reset(afpf_mbox
, 0);
600 static void handle_msg_cpt_inst_lmtst(struct otx2_cptpf_dev
*cptpf
,
601 struct mbox_msghdr
*msg
)
603 struct cpt_inst_lmtst_req
*req
= (struct cpt_inst_lmtst_req
*)msg
;
604 struct otx2_cptlfs_info
*lfs
= &cptpf
->lfs
;
607 if (cptpf
->lfs
.lfs_num
)
608 lfs
->ops
->send_cmd((union otx2_cpt_inst_s
*)req
->inst
, 1,
611 rsp
= (struct msg_rsp
*)otx2_mbox_alloc_msg(&cptpf
->afpf_mbox_up
, 0,
616 rsp
->hdr
.id
= msg
->id
;
617 rsp
->hdr
.sig
= OTX2_MBOX_RSP_SIG
;
618 rsp
->hdr
.pcifunc
= 0;
622 static void process_afpf_mbox_up_msg(struct otx2_cptpf_dev
*cptpf
,
623 struct mbox_msghdr
*msg
)
625 if (msg
->id
>= MBOX_MSG_MAX
) {
626 dev_err(&cptpf
->pdev
->dev
,
627 "MBOX msg with unknown ID %d\n", msg
->id
);
632 case MBOX_MSG_CPT_INST_LMTST
:
633 handle_msg_cpt_inst_lmtst(cptpf
, msg
);
636 otx2_reply_invalid_msg(&cptpf
->afpf_mbox_up
, 0, 0, msg
->id
);
640 void otx2_cptpf_afpf_mbox_up_handler(struct work_struct
*work
)
642 struct otx2_cptpf_dev
*cptpf
;
643 struct otx2_mbox_dev
*mdev
;
644 struct mbox_hdr
*rsp_hdr
;
645 struct mbox_msghdr
*msg
;
646 struct otx2_mbox
*mbox
;
649 cptpf
= container_of(work
, struct otx2_cptpf_dev
, afpf_mbox_up_work
);
650 mbox
= &cptpf
->afpf_mbox_up
;
651 mdev
= &mbox
->dev
[0];
652 /* Sync mbox data into memory */
655 rsp_hdr
= (struct mbox_hdr
*)(mdev
->mbase
+ mbox
->rx_start
);
656 offset
= mbox
->rx_start
+ ALIGN(sizeof(*rsp_hdr
), MBOX_MSG_ALIGN
);
658 for (i
= 0; i
< rsp_hdr
->num_msgs
; i
++) {
659 msg
= (struct mbox_msghdr
*)(mdev
->mbase
+ offset
);
661 process_afpf_mbox_up_msg(cptpf
, msg
);
663 offset
= mbox
->rx_start
+ msg
->next_msgoff
;
665 otx2_mbox_msg_send(mbox
, 0);