1 // SPDX-License-Identifier: GPL-2.0
2 /* Marvell OcteonTX CPT driver
4 * Copyright (C) 2019 Marvell International Ltd.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
11 #include "otx_cpt_common.h"
12 #include "otx_cptpf.h"
14 static char *get_mbox_opcode_str(int msg_opcode
)
16 char *str
= "Unknown";
19 case OTX_CPT_MSG_VF_UP
:
23 case OTX_CPT_MSG_VF_DOWN
:
27 case OTX_CPT_MSG_READY
:
31 case OTX_CPT_MSG_QLEN
:
35 case OTX_CPT_MSG_QBIND_GRP
:
39 case OTX_CPT_MSG_VQ_PRIORITY
:
43 case OTX_CPT_MSG_PF_TYPE
:
51 case OTX_CPT_MSG_NACK
:
59 static void dump_mbox_msg(struct otx_cpt_mbox
*mbox_msg
, int vf_id
)
61 char raw_data_str
[OTX_CPT_MAX_MBOX_DATA_STR_SIZE
];
63 hex_dump_to_buffer(mbox_msg
, sizeof(struct otx_cpt_mbox
), 16, 8,
64 raw_data_str
, OTX_CPT_MAX_MBOX_DATA_STR_SIZE
, false);
66 pr_debug("MBOX opcode %s received from VF%d raw_data %s\n",
67 get_mbox_opcode_str(mbox_msg
->msg
), vf_id
,
70 pr_debug("MBOX opcode %s received from PF raw_data %s\n",
71 get_mbox_opcode_str(mbox_msg
->msg
), raw_data_str
);
74 static void otx_cpt_send_msg_to_vf(struct otx_cpt_device
*cpt
, int vf
,
75 struct otx_cpt_mbox
*mbx
)
77 /* Writing mbox(0) causes interrupt */
78 writeq(mbx
->data
, cpt
->reg_base
+ OTX_CPT_PF_VFX_MBOXX(vf
, 1));
79 writeq(mbx
->msg
, cpt
->reg_base
+ OTX_CPT_PF_VFX_MBOXX(vf
, 0));
83 * ACKs VF's mailbox message
84 * @vf: VF to which ACK to be sent
86 static void otx_cpt_mbox_send_ack(struct otx_cpt_device
*cpt
, int vf
,
87 struct otx_cpt_mbox
*mbx
)
90 mbx
->msg
= OTX_CPT_MSG_ACK
;
91 otx_cpt_send_msg_to_vf(cpt
, vf
, mbx
);
94 /* NACKs VF's mailbox message that PF is not able to complete the action */
95 static void otx_cptpf_mbox_send_nack(struct otx_cpt_device
*cpt
, int vf
,
96 struct otx_cpt_mbox
*mbx
)
99 mbx
->msg
= OTX_CPT_MSG_NACK
;
100 otx_cpt_send_msg_to_vf(cpt
, vf
, mbx
);
103 static void otx_cpt_clear_mbox_intr(struct otx_cpt_device
*cpt
, u32 vf
)
106 writeq(1ull << vf
, cpt
->reg_base
+ OTX_CPT_PF_MBOX_INTX(0));
110 * Configure QLEN/Chunk sizes for VF
112 static void otx_cpt_cfg_qlen_for_vf(struct otx_cpt_device
*cpt
, int vf
,
115 union otx_cptx_pf_qx_ctl pf_qx_ctl
;
117 pf_qx_ctl
.u
= readq(cpt
->reg_base
+ OTX_CPT_PF_QX_CTL(vf
));
118 pf_qx_ctl
.s
.size
= size
;
119 pf_qx_ctl
.s
.cont_err
= true;
120 writeq(pf_qx_ctl
.u
, cpt
->reg_base
+ OTX_CPT_PF_QX_CTL(vf
));
124 * Configure VQ priority
126 static void otx_cpt_cfg_vq_priority(struct otx_cpt_device
*cpt
, int vf
, u32 pri
)
128 union otx_cptx_pf_qx_ctl pf_qx_ctl
;
130 pf_qx_ctl
.u
= readq(cpt
->reg_base
+ OTX_CPT_PF_QX_CTL(vf
));
131 pf_qx_ctl
.s
.pri
= pri
;
132 writeq(pf_qx_ctl
.u
, cpt
->reg_base
+ OTX_CPT_PF_QX_CTL(vf
));
135 static int otx_cpt_bind_vq_to_grp(struct otx_cpt_device
*cpt
, u8 q
, u8 grp
)
137 struct device
*dev
= &cpt
->pdev
->dev
;
138 struct otx_cpt_eng_grp_info
*eng_grp
;
139 union otx_cptx_pf_qx_ctl pf_qx_ctl
;
140 struct otx_cpt_ucode
*ucode
;
142 if (q
>= cpt
->max_vfs
) {
143 dev_err(dev
, "Requested queue %d is > than maximum avail %d\n",
148 if (grp
>= OTX_CPT_MAX_ENGINE_GROUPS
) {
149 dev_err(dev
, "Requested group %d is > than maximum avail %d\n",
150 grp
, OTX_CPT_MAX_ENGINE_GROUPS
);
154 eng_grp
= &cpt
->eng_grps
.grp
[grp
];
155 if (!eng_grp
->is_enabled
) {
156 dev_err(dev
, "Requested engine group %d is disabled\n", grp
);
160 pf_qx_ctl
.u
= readq(cpt
->reg_base
+ OTX_CPT_PF_QX_CTL(q
));
161 pf_qx_ctl
.s
.grp
= grp
;
162 writeq(pf_qx_ctl
.u
, cpt
->reg_base
+ OTX_CPT_PF_QX_CTL(q
));
164 if (eng_grp
->mirror
.is_ena
)
165 ucode
= &eng_grp
->g
->grp
[eng_grp
->mirror
.idx
].ucode
[0];
167 ucode
= &eng_grp
->ucode
[0];
169 if (otx_cpt_uc_supports_eng_type(ucode
, OTX_CPT_SE_TYPES
))
170 return OTX_CPT_SE_TYPES
;
171 else if (otx_cpt_uc_supports_eng_type(ucode
, OTX_CPT_AE_TYPES
))
172 return OTX_CPT_AE_TYPES
;
174 return BAD_OTX_CPTVF_TYPE
;
177 /* Interrupt handler to handle mailbox messages from VFs */
178 static void otx_cpt_handle_mbox_intr(struct otx_cpt_device
*cpt
, int vf
)
181 struct otx_cpt_mbox mbx
= {};
182 struct device
*dev
= &cpt
->pdev
->dev
;
184 * MBOX[0] contains msg
185 * MBOX[1] contains data
187 mbx
.msg
= readq(cpt
->reg_base
+ OTX_CPT_PF_VFX_MBOXX(vf
, 0));
188 mbx
.data
= readq(cpt
->reg_base
+ OTX_CPT_PF_VFX_MBOXX(vf
, 1));
190 dump_mbox_msg(&mbx
, vf
);
193 case OTX_CPT_MSG_VF_UP
:
194 mbx
.msg
= OTX_CPT_MSG_VF_UP
;
195 mbx
.data
= cpt
->vfs_enabled
;
196 otx_cpt_send_msg_to_vf(cpt
, vf
, &mbx
);
198 case OTX_CPT_MSG_READY
:
199 mbx
.msg
= OTX_CPT_MSG_READY
;
201 otx_cpt_send_msg_to_vf(cpt
, vf
, &mbx
);
203 case OTX_CPT_MSG_VF_DOWN
:
204 /* First msg in VF teardown sequence */
205 otx_cpt_mbox_send_ack(cpt
, vf
, &mbx
);
207 case OTX_CPT_MSG_QLEN
:
208 otx_cpt_cfg_qlen_for_vf(cpt
, vf
, mbx
.data
);
209 otx_cpt_mbox_send_ack(cpt
, vf
, &mbx
);
211 case OTX_CPT_MSG_QBIND_GRP
:
212 vftype
= otx_cpt_bind_vq_to_grp(cpt
, vf
, (u8
)mbx
.data
);
213 if ((vftype
!= OTX_CPT_AE_TYPES
) &&
214 (vftype
!= OTX_CPT_SE_TYPES
)) {
215 dev_err(dev
, "VF%d binding to eng group %llu failed\n",
217 otx_cptpf_mbox_send_nack(cpt
, vf
, &mbx
);
219 mbx
.msg
= OTX_CPT_MSG_QBIND_GRP
;
221 otx_cpt_send_msg_to_vf(cpt
, vf
, &mbx
);
224 case OTX_CPT_MSG_PF_TYPE
:
225 mbx
.msg
= OTX_CPT_MSG_PF_TYPE
;
226 mbx
.data
= cpt
->pf_type
;
227 otx_cpt_send_msg_to_vf(cpt
, vf
, &mbx
);
229 case OTX_CPT_MSG_VQ_PRIORITY
:
230 otx_cpt_cfg_vq_priority(cpt
, vf
, mbx
.data
);
231 otx_cpt_mbox_send_ack(cpt
, vf
, &mbx
);
234 dev_err(&cpt
->pdev
->dev
, "Invalid msg from VF%d, msg 0x%llx\n",
240 void otx_cpt_mbox_intr_handler (struct otx_cpt_device
*cpt
, int mbx
)
245 intr
= readq(cpt
->reg_base
+ OTX_CPT_PF_MBOX_INTX(0));
246 pr_debug("PF interrupt mbox%d mask 0x%llx\n", mbx
, intr
);
247 for (vf
= 0; vf
< cpt
->max_vfs
; vf
++) {
248 if (intr
& (1ULL << vf
)) {
249 otx_cpt_handle_mbox_intr(cpt
, vf
);
250 otx_cpt_clear_mbox_intr(cpt
, vf
);