2 * Copyright (c) 2016 Hisilicon Limited.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/platform_device.h>
34 #include "hns_roce_common.h"
35 #include "hns_roce_device.h"
36 #include "hns_roce_eq.h"
38 static void eq_set_cons_index(struct hns_roce_eq
*eq
, int req_not
)
40 roce_raw_write((eq
->cons_index
& CONS_INDEX_MASK
) |
41 (req_not
<< eq
->log_entries
), eq
->doorbell
);
46 static struct hns_roce_aeqe
*get_aeqe(struct hns_roce_eq
*eq
, u32 entry
)
48 unsigned long off
= (entry
& (eq
->entries
- 1)) *
49 HNS_ROCE_AEQ_ENTRY_SIZE
;
51 return (struct hns_roce_aeqe
*)((u8
*)
52 (eq
->buf_list
[off
/ HNS_ROCE_BA_SIZE
].buf
) +
53 off
% HNS_ROCE_BA_SIZE
);
56 static struct hns_roce_aeqe
*next_aeqe_sw(struct hns_roce_eq
*eq
)
58 struct hns_roce_aeqe
*aeqe
= get_aeqe(eq
, eq
->cons_index
);
60 return (roce_get_bit(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_OWNER_S
) ^
61 !!(eq
->cons_index
& eq
->entries
)) ? aeqe
: NULL
;
64 static void hns_roce_wq_catas_err_handle(struct hns_roce_dev
*hr_dev
,
65 struct hns_roce_aeqe
*aeqe
, int qpn
)
67 struct device
*dev
= &hr_dev
->pdev
->dev
;
69 dev_warn(dev
, "Local Work Queue Catastrophic Error.\n");
70 switch (roce_get_field(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M
,
71 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S
)) {
72 case HNS_ROCE_LWQCE_QPC_ERROR
:
73 dev_warn(dev
, "QP %d, QPC error.\n", qpn
);
75 case HNS_ROCE_LWQCE_MTU_ERROR
:
76 dev_warn(dev
, "QP %d, MTU error.\n", qpn
);
78 case HNS_ROCE_LWQCE_WQE_BA_ADDR_ERROR
:
79 dev_warn(dev
, "QP %d, WQE BA addr error.\n", qpn
);
81 case HNS_ROCE_LWQCE_WQE_ADDR_ERROR
:
82 dev_warn(dev
, "QP %d, WQE addr error.\n", qpn
);
84 case HNS_ROCE_LWQCE_SQ_WQE_SHIFT_ERROR
:
85 dev_warn(dev
, "QP %d, WQE shift error\n", qpn
);
87 case HNS_ROCE_LWQCE_SL_ERROR
:
88 dev_warn(dev
, "QP %d, SL error.\n", qpn
);
90 case HNS_ROCE_LWQCE_PORT_ERROR
:
91 dev_warn(dev
, "QP %d, port error.\n", qpn
);
98 static void hns_roce_local_wq_access_err_handle(struct hns_roce_dev
*hr_dev
,
99 struct hns_roce_aeqe
*aeqe
,
102 struct device
*dev
= &hr_dev
->pdev
->dev
;
104 dev_warn(dev
, "Local Access Violation Work Queue Error.\n");
105 switch (roce_get_field(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M
,
106 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S
)) {
107 case HNS_ROCE_LAVWQE_R_KEY_VIOLATION
:
108 dev_warn(dev
, "QP %d, R_key violation.\n", qpn
);
110 case HNS_ROCE_LAVWQE_LENGTH_ERROR
:
111 dev_warn(dev
, "QP %d, length error.\n", qpn
);
113 case HNS_ROCE_LAVWQE_VA_ERROR
:
114 dev_warn(dev
, "QP %d, VA error.\n", qpn
);
116 case HNS_ROCE_LAVWQE_PD_ERROR
:
117 dev_err(dev
, "QP %d, PD error.\n", qpn
);
119 case HNS_ROCE_LAVWQE_RW_ACC_ERROR
:
120 dev_warn(dev
, "QP %d, rw acc error.\n", qpn
);
122 case HNS_ROCE_LAVWQE_KEY_STATE_ERROR
:
123 dev_warn(dev
, "QP %d, key state error.\n", qpn
);
125 case HNS_ROCE_LAVWQE_MR_OPERATION_ERROR
:
126 dev_warn(dev
, "QP %d, MR operation error.\n", qpn
);
133 static void hns_roce_qp_err_handle(struct hns_roce_dev
*hr_dev
,
134 struct hns_roce_aeqe
*aeqe
,
137 struct device
*dev
= &hr_dev
->pdev
->dev
;
141 qpn
= roce_get_field(aeqe
->event
.qp_event
.qp
,
142 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_M
,
143 HNS_ROCE_AEQE_EVENT_QP_EVENT_QP_QPN_S
);
144 phy_port
= roce_get_field(aeqe
->event
.qp_event
.qp
,
145 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_M
,
146 HNS_ROCE_AEQE_EVENT_QP_EVENT_PORT_NUM_S
);
148 qpn
= HNS_ROCE_MAX_PORTS
* qpn
+ phy_port
;
150 switch (event_type
) {
151 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
152 dev_warn(dev
, "Invalid Req Local Work Queue Error.\n"
153 "QP %d, phy_port %d.\n", qpn
, phy_port
);
155 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
156 hns_roce_wq_catas_err_handle(hr_dev
, aeqe
, qpn
);
158 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
159 hns_roce_local_wq_access_err_handle(hr_dev
, aeqe
, qpn
);
165 hns_roce_qp_event(hr_dev
, qpn
, event_type
);
168 static void hns_roce_cq_err_handle(struct hns_roce_dev
*hr_dev
,
169 struct hns_roce_aeqe
*aeqe
,
172 struct device
*dev
= &hr_dev
->pdev
->dev
;
175 cqn
= le32_to_cpu(roce_get_field(aeqe
->event
.cq_event
.cq
,
176 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_M
,
177 HNS_ROCE_AEQE_EVENT_CQ_EVENT_CQ_CQN_S
));
179 switch (event_type
) {
180 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
:
181 dev_warn(dev
, "CQ 0x%x access err.\n", cqn
);
183 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
:
184 dev_warn(dev
, "CQ 0x%x overflow\n", cqn
);
186 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID
:
187 dev_warn(dev
, "CQ 0x%x ID invalid.\n", cqn
);
193 hns_roce_cq_event(hr_dev
, cqn
, event_type
);
196 static void hns_roce_db_overflow_handle(struct hns_roce_dev
*hr_dev
,
197 struct hns_roce_aeqe
*aeqe
)
199 struct device
*dev
= &hr_dev
->pdev
->dev
;
201 switch (roce_get_field(aeqe
->asyn
, HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_M
,
202 HNS_ROCE_AEQE_U32_4_EVENT_SUB_TYPE_S
)) {
203 case HNS_ROCE_DB_SUBTYPE_SDB_OVF
:
204 dev_warn(dev
, "SDB overflow.\n");
206 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_OVF
:
207 dev_warn(dev
, "SDB almost overflow.\n");
209 case HNS_ROCE_DB_SUBTYPE_SDB_ALM_EMP
:
210 dev_warn(dev
, "SDB almost empty.\n");
212 case HNS_ROCE_DB_SUBTYPE_ODB_OVF
:
213 dev_warn(dev
, "ODB overflow.\n");
215 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_OVF
:
216 dev_warn(dev
, "ODB almost overflow.\n");
218 case HNS_ROCE_DB_SUBTYPE_ODB_ALM_EMP
:
219 dev_warn(dev
, "SDB almost empty.\n");
226 static int hns_roce_aeq_int(struct hns_roce_dev
*hr_dev
, struct hns_roce_eq
*eq
)
228 struct device
*dev
= &hr_dev
->pdev
->dev
;
229 struct hns_roce_aeqe
*aeqe
;
233 while ((aeqe
= next_aeqe_sw(eq
))) {
234 dev_dbg(dev
, "aeqe = %p, aeqe->asyn.event_type = 0x%lx\n", aeqe
,
235 roce_get_field(aeqe
->asyn
,
236 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
237 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
));
241 event_type
= roce_get_field(aeqe
->asyn
,
242 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_M
,
243 HNS_ROCE_AEQE_U32_4_EVENT_TYPE_S
);
244 switch (event_type
) {
245 case HNS_ROCE_EVENT_TYPE_PATH_MIG
:
246 dev_warn(dev
, "PATH MIG not supported\n");
248 case HNS_ROCE_EVENT_TYPE_COMM_EST
:
249 dev_warn(dev
, "COMMUNICATION established\n");
251 case HNS_ROCE_EVENT_TYPE_SQ_DRAINED
:
252 dev_warn(dev
, "SQ DRAINED not supported\n");
254 case HNS_ROCE_EVENT_TYPE_PATH_MIG_FAILED
:
255 dev_warn(dev
, "PATH MIG failed\n");
257 case HNS_ROCE_EVENT_TYPE_INV_REQ_LOCAL_WQ_ERROR
:
258 case HNS_ROCE_EVENT_TYPE_WQ_CATAS_ERROR
:
259 case HNS_ROCE_EVENT_TYPE_LOCAL_WQ_ACCESS_ERROR
:
260 hns_roce_qp_err_handle(hr_dev
, aeqe
, event_type
);
262 case HNS_ROCE_EVENT_TYPE_SRQ_LIMIT_REACH
:
263 case HNS_ROCE_EVENT_TYPE_SRQ_CATAS_ERROR
:
264 case HNS_ROCE_EVENT_TYPE_SRQ_LAST_WQE_REACH
:
265 dev_warn(dev
, "SRQ not support!\n");
267 case HNS_ROCE_EVENT_TYPE_CQ_ACCESS_ERROR
:
268 case HNS_ROCE_EVENT_TYPE_CQ_OVERFLOW
:
269 case HNS_ROCE_EVENT_TYPE_CQ_ID_INVALID
:
270 hns_roce_cq_err_handle(hr_dev
, aeqe
, event_type
);
272 case HNS_ROCE_EVENT_TYPE_PORT_CHANGE
:
273 dev_warn(dev
, "port change.\n");
275 case HNS_ROCE_EVENT_TYPE_MB
:
276 hns_roce_cmd_event(hr_dev
,
277 le16_to_cpu(aeqe
->event
.cmd
.token
),
278 aeqe
->event
.cmd
.status
,
279 le64_to_cpu(aeqe
->event
.cmd
.out_param
282 case HNS_ROCE_EVENT_TYPE_DB_OVERFLOW
:
283 hns_roce_db_overflow_handle(hr_dev
, aeqe
);
285 case HNS_ROCE_EVENT_TYPE_CEQ_OVERFLOW
:
286 dev_warn(dev
, "CEQ 0x%lx overflow.\n",
287 roce_get_field(aeqe
->event
.ce_event
.ceqe
,
288 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_M
,
289 HNS_ROCE_AEQE_EVENT_CE_EVENT_CEQE_CEQN_S
));
292 dev_warn(dev
, "Unhandled event %d on EQ %d at index %u\n",
293 event_type
, eq
->eqn
, eq
->cons_index
);
300 if (eq
->cons_index
> 2 * hr_dev
->caps
.aeqe_depth
- 1) {
301 dev_warn(dev
, "cons_index overflow, set back to zero\n"
307 eq_set_cons_index(eq
, 0);
312 static struct hns_roce_ceqe
*get_ceqe(struct hns_roce_eq
*eq
, u32 entry
)
314 unsigned long off
= (entry
& (eq
->entries
- 1)) *
315 HNS_ROCE_CEQ_ENTRY_SIZE
;
317 return (struct hns_roce_ceqe
*)((u8
*)
318 (eq
->buf_list
[off
/ HNS_ROCE_BA_SIZE
].buf
) +
319 off
% HNS_ROCE_BA_SIZE
);
322 static struct hns_roce_ceqe
*next_ceqe_sw(struct hns_roce_eq
*eq
)
324 struct hns_roce_ceqe
*ceqe
= get_ceqe(eq
, eq
->cons_index
);
326 return (!!(roce_get_bit(ceqe
->ceqe
.comp
,
327 HNS_ROCE_CEQE_CEQE_COMP_OWNER_S
))) ^
328 (!!(eq
->cons_index
& eq
->entries
)) ? ceqe
: NULL
;
331 static int hns_roce_ceq_int(struct hns_roce_dev
*hr_dev
, struct hns_roce_eq
*eq
)
333 struct hns_roce_ceqe
*ceqe
;
337 while ((ceqe
= next_ceqe_sw(eq
))) {
340 cqn
= roce_get_field(ceqe
->ceqe
.comp
,
341 HNS_ROCE_CEQE_CEQE_COMP_CQN_M
,
342 HNS_ROCE_CEQE_CEQE_COMP_CQN_S
);
343 hns_roce_cq_completion(hr_dev
, cqn
);
348 if (eq
->cons_index
> 2 * hr_dev
->caps
.ceqe_depth
[eq
->eqn
] - 1) {
349 dev_warn(&eq
->hr_dev
->pdev
->dev
,
350 "cons_index overflow, set back to zero\n");
355 eq_set_cons_index(eq
, 0);
360 static int hns_roce_aeq_ovf_int(struct hns_roce_dev
*hr_dev
,
361 struct hns_roce_eq
*eq
)
363 struct device
*dev
= &eq
->hr_dev
->pdev
->dev
;
374 * AEQ overflow ECC mult bit err CEQ overflow alarm
375 * must clear interrupt, mask irq, clear irq, cancel mask operation
377 aeshift_val
= roce_read(hr_dev
, ROCEE_CAEP_AEQC_AEQE_SHIFT_REG
);
379 if (roce_get_bit(aeshift_val
,
380 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQ_ALM_OVF_INT_ST_S
) == 1) {
381 dev_warn(dev
, "AEQ overflow!\n");
384 caepaemask_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_MASK_REG
);
385 roce_set_bit(caepaemask_val
,
386 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S
,
387 HNS_ROCE_INT_MASK_ENABLE
);
388 roce_write(hr_dev
, ROCEE_CAEP_AE_MASK_REG
, caepaemask_val
);
390 /* Clear int state(INT_WC : write 1 clear) */
391 caepaest_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_ST_REG
);
392 roce_set_bit(caepaest_val
,
393 ROCEE_CAEP_AE_ST_CAEP_AEQ_ALM_OVF_S
, 1);
394 roce_write(hr_dev
, ROCEE_CAEP_AE_ST_REG
, caepaest_val
);
397 caepaemask_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_MASK_REG
);
398 roce_set_bit(caepaemask_val
,
399 ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S
,
400 HNS_ROCE_INT_MASK_DISABLE
);
401 roce_write(hr_dev
, ROCEE_CAEP_AE_MASK_REG
, caepaemask_val
);
404 /* CEQ almost overflow */
405 for (i
= 0; i
< hr_dev
->caps
.num_comp_vectors
; i
++) {
406 ceshift_val
= roce_read(hr_dev
, ROCEE_CAEP_CEQC_SHIFT_0_REG
+
409 if (roce_get_bit(ceshift_val
,
410 ROCEE_CAEP_CEQC_SHIFT_CAEP_CEQ_ALM_OVF_INT_ST_S
) == 1) {
411 dev_warn(dev
, "CEQ[%d] almost overflow!\n", i
);
415 cemask_val
= roce_read(hr_dev
,
416 ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
418 roce_set_bit(cemask_val
,
419 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S
,
420 HNS_ROCE_INT_MASK_ENABLE
);
421 roce_write(hr_dev
, ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
422 i
* CEQ_REG_OFFSET
, cemask_val
);
424 /* Clear int state(INT_WC : write 1 clear) */
425 cealmovf_val
= roce_read(hr_dev
,
426 ROCEE_CAEP_CEQ_ALM_OVF_0_REG
+
428 roce_set_bit(cealmovf_val
,
429 ROCEE_CAEP_CEQ_ALM_OVF_CAEP_CEQ_ALM_OVF_S
,
431 roce_write(hr_dev
, ROCEE_CAEP_CEQ_ALM_OVF_0_REG
+
432 i
* CEQ_REG_OFFSET
, cealmovf_val
);
435 cemask_val
= roce_read(hr_dev
,
436 ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
438 roce_set_bit(cemask_val
,
439 ROCEE_CAEP_CE_IRQ_MASK_CAEP_CEQ_ALM_OVF_MASK_S
,
440 HNS_ROCE_INT_MASK_DISABLE
);
441 roce_write(hr_dev
, ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
442 i
* CEQ_REG_OFFSET
, cemask_val
);
446 /* ECC multi-bit error alarm */
447 dev_warn(dev
, "ECC UCERR ALARM: 0x%x, 0x%x, 0x%x\n",
448 roce_read(hr_dev
, ROCEE_ECC_UCERR_ALM0_REG
),
449 roce_read(hr_dev
, ROCEE_ECC_UCERR_ALM1_REG
),
450 roce_read(hr_dev
, ROCEE_ECC_UCERR_ALM2_REG
));
452 dev_warn(dev
, "ECC CERR ALARM: 0x%x, 0x%x, 0x%x\n",
453 roce_read(hr_dev
, ROCEE_ECC_CERR_ALM0_REG
),
454 roce_read(hr_dev
, ROCEE_ECC_CERR_ALM1_REG
),
455 roce_read(hr_dev
, ROCEE_ECC_CERR_ALM2_REG
));
460 static int hns_roce_eq_int(struct hns_roce_dev
*hr_dev
, struct hns_roce_eq
*eq
)
464 if (likely(eq
->type_flag
== HNS_ROCE_CEQ
))
465 /* CEQ irq routine, CEQ is pulse irq, not clear */
466 eqes_found
= hns_roce_ceq_int(hr_dev
, eq
);
467 else if (likely(eq
->type_flag
== HNS_ROCE_AEQ
))
468 /* AEQ irq routine, AEQ is pulse irq, not clear */
469 eqes_found
= hns_roce_aeq_int(hr_dev
, eq
);
471 /* AEQ queue overflow irq */
472 eqes_found
= hns_roce_aeq_ovf_int(hr_dev
, eq
);
477 static irqreturn_t
hns_roce_msi_x_interrupt(int irq
, void *eq_ptr
)
480 struct hns_roce_eq
*eq
= eq_ptr
;
481 struct hns_roce_dev
*hr_dev
= eq
->hr_dev
;
483 int_work
= hns_roce_eq_int(hr_dev
, eq
);
485 return IRQ_RETVAL(int_work
);
488 static void hns_roce_enable_eq(struct hns_roce_dev
*hr_dev
, int eq_num
,
491 void __iomem
*eqc
= hr_dev
->eq_table
.eqc_base
[eq_num
];
498 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M
,
499 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S
,
500 HNS_ROCE_EQ_STAT_VALID
);
503 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M
,
504 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S
,
505 HNS_ROCE_EQ_STAT_INVALID
);
509 static int hns_roce_create_eq(struct hns_roce_dev
*hr_dev
,
510 struct hns_roce_eq
*eq
)
512 void __iomem
*eqc
= hr_dev
->eq_table
.eqc_base
[eq
->eqn
];
513 struct device
*dev
= &hr_dev
->pdev
->dev
;
514 dma_addr_t tmp_dma_addr
;
515 u32 eqconsindx_val
= 0;
516 u32 eqcuridx_val
= 0;
522 num_bas
= (PAGE_ALIGN(eq
->entries
* eq
->eqe_size
) +
523 HNS_ROCE_BA_SIZE
- 1) / HNS_ROCE_BA_SIZE
;
525 if ((eq
->entries
* eq
->eqe_size
) > HNS_ROCE_BA_SIZE
) {
526 dev_err(dev
, "[error]eq buf %d gt ba size(%d) need bas=%d\n",
527 (eq
->entries
* eq
->eqe_size
), HNS_ROCE_BA_SIZE
,
532 eq
->buf_list
= kcalloc(num_bas
, sizeof(*eq
->buf_list
), GFP_KERNEL
);
536 for (i
= 0; i
< num_bas
; ++i
) {
537 eq
->buf_list
[i
].buf
= dma_alloc_coherent(dev
, HNS_ROCE_BA_SIZE
,
540 if (!eq
->buf_list
[i
].buf
) {
542 goto err_out_free_pages
;
545 eq
->buf_list
[i
].map
= tmp_dma_addr
;
546 memset(eq
->buf_list
[i
].buf
, 0, HNS_ROCE_BA_SIZE
);
549 roce_set_field(eqshift_val
,
550 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_M
,
551 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_STATE_S
,
552 HNS_ROCE_EQ_STAT_INVALID
);
553 roce_set_field(eqshift_val
,
554 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_M
,
555 ROCEE_CAEP_AEQC_AEQE_SHIFT_CAEP_AEQC_AEQE_SHIFT_S
,
557 writel(eqshift_val
, eqc
);
559 /* Configure eq extended address 12~44bit */
560 writel((u32
)(eq
->buf_list
[0].map
>> 12), (u8
*)eqc
+ 4);
563 * Configure eq extended address 45~49 bit.
564 * 44 = 32 + 12, When evaluating addr to hardware, shift 12 because of
565 * using 4K page, and shift more 32 because of
566 * caculating the high 32 bit value evaluated to hardware.
568 roce_set_field(eqcuridx_val
, ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_M
,
569 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQ_BT_H_S
,
570 eq
->buf_list
[0].map
>> 44);
571 roce_set_field(eqcuridx_val
,
572 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_M
,
573 ROCEE_CAEP_AEQE_CUR_IDX_CAEP_AEQE_CUR_IDX_S
, 0);
574 writel(eqcuridx_val
, (u8
*)eqc
+ 8);
576 /* Configure eq consumer index */
577 roce_set_field(eqconsindx_val
,
578 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_M
,
579 ROCEE_CAEP_AEQE_CONS_IDX_CAEP_AEQE_CONS_IDX_S
, 0);
580 writel(eqconsindx_val
, (u8
*)eqc
+ 0xc);
585 for (i
= i
- 1; i
>= 0; i
--)
586 dma_free_coherent(dev
, HNS_ROCE_BA_SIZE
, eq
->buf_list
[i
].buf
,
587 eq
->buf_list
[i
].map
);
593 static void hns_roce_free_eq(struct hns_roce_dev
*hr_dev
,
594 struct hns_roce_eq
*eq
)
597 int npages
= (PAGE_ALIGN(eq
->eqe_size
* eq
->entries
) +
598 HNS_ROCE_BA_SIZE
- 1) / HNS_ROCE_BA_SIZE
;
603 for (i
= 0; i
< npages
; ++i
)
604 dma_free_coherent(&hr_dev
->pdev
->dev
, HNS_ROCE_BA_SIZE
,
605 eq
->buf_list
[i
].buf
, eq
->buf_list
[i
].map
);
610 static void hns_roce_int_mask_en(struct hns_roce_dev
*hr_dev
)
617 aemask_val
= roce_read(hr_dev
, ROCEE_CAEP_AE_MASK_REG
);
618 roce_set_bit(aemask_val
, ROCEE_CAEP_AE_MASK_CAEP_AEQ_ALM_OVF_MASK_S
,
620 roce_set_bit(aemask_val
, ROCEE_CAEP_AE_MASK_CAEP_AE_IRQ_MASK_S
, masken
);
621 roce_write(hr_dev
, ROCEE_CAEP_AE_MASK_REG
, aemask_val
);
624 for (i
= 0; i
< hr_dev
->caps
.num_comp_vectors
; i
++) {
626 roce_write(hr_dev
, ROCEE_CAEP_CE_IRQ_MASK_0_REG
+
627 i
* CEQ_REG_OFFSET
, masken
);
631 static void hns_roce_ce_int_default_cfg(struct hns_roce_dev
*hr_dev
)
633 /* Configure ce int interval */
634 roce_write(hr_dev
, ROCEE_CAEP_CE_INTERVAL_CFG_REG
,
635 HNS_ROCE_CEQ_DEFAULT_INTERVAL
);
637 /* Configure ce int burst num */
638 roce_write(hr_dev
, ROCEE_CAEP_CE_BURST_NUM_CFG_REG
,
639 HNS_ROCE_CEQ_DEFAULT_BURST_NUM
);
642 int hns_roce_init_eq_table(struct hns_roce_dev
*hr_dev
)
644 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
645 struct device
*dev
= &hr_dev
->pdev
->dev
;
646 struct hns_roce_eq
*eq
= NULL
;
652 eq_num
= hr_dev
->caps
.num_comp_vectors
+ hr_dev
->caps
.num_aeq_vectors
;
653 eq_table
->eq
= kcalloc(eq_num
, sizeof(*eq_table
->eq
), GFP_KERNEL
);
657 eq_table
->eqc_base
= kcalloc(eq_num
, sizeof(*eq_table
->eqc_base
),
659 if (!eq_table
->eqc_base
) {
661 goto err_eqc_base_alloc_fail
;
664 for (i
= 0; i
< eq_num
; i
++) {
665 eq
= &eq_table
->eq
[i
];
668 eq
->irq
= hr_dev
->irq
[i
];
669 eq
->log_page_size
= PAGE_SHIFT
;
671 if (i
< hr_dev
->caps
.num_comp_vectors
) {
673 eq_table
->eqc_base
[i
] = hr_dev
->reg_base
+
674 ROCEE_CAEP_CEQC_SHIFT_0_REG
+
675 HNS_ROCE_CEQC_REG_OFFSET
* i
;
676 eq
->type_flag
= HNS_ROCE_CEQ
;
677 eq
->doorbell
= hr_dev
->reg_base
+
678 ROCEE_CAEP_CEQC_CONS_IDX_0_REG
+
679 HNS_ROCE_CEQC_REG_OFFSET
* i
;
680 eq
->entries
= hr_dev
->caps
.ceqe_depth
[i
];
681 eq
->log_entries
= ilog2(eq
->entries
);
682 eq
->eqe_size
= sizeof(struct hns_roce_ceqe
);
685 eq_table
->eqc_base
[i
] = hr_dev
->reg_base
+
686 ROCEE_CAEP_AEQC_AEQE_SHIFT_REG
;
687 eq
->type_flag
= HNS_ROCE_AEQ
;
688 eq
->doorbell
= hr_dev
->reg_base
+
689 ROCEE_CAEP_AEQE_CONS_IDX_REG
;
690 eq
->entries
= hr_dev
->caps
.aeqe_depth
;
691 eq
->log_entries
= ilog2(eq
->entries
);
692 eq
->eqe_size
= sizeof(struct hns_roce_aeqe
);
697 hns_roce_int_mask_en(hr_dev
);
699 /* Configure CE irq interval and burst num */
700 hns_roce_ce_int_default_cfg(hr_dev
);
702 for (i
= 0; i
< eq_num
; i
++) {
703 ret
= hns_roce_create_eq(hr_dev
, &eq_table
->eq
[i
]);
705 dev_err(dev
, "eq create failed\n");
706 goto err_create_eq_fail
;
710 for (j
= 0; j
< eq_num
; j
++) {
711 ret
= request_irq(eq_table
->eq
[j
].irq
, hns_roce_msi_x_interrupt
,
712 0, hr_dev
->irq_names
[j
], eq_table
->eq
+ j
);
714 dev_err(dev
, "request irq error!\n");
715 goto err_request_irq_fail
;
719 for (i
= 0; i
< eq_num
; i
++)
720 hns_roce_enable_eq(hr_dev
, i
, EQ_ENABLE
);
724 err_request_irq_fail
:
725 for (j
= j
- 1; j
>= 0; j
--)
726 free_irq(eq_table
->eq
[j
].irq
, eq_table
->eq
+ j
);
729 for (i
= i
- 1; i
>= 0; i
--)
730 hns_roce_free_eq(hr_dev
, &eq_table
->eq
[i
]);
732 kfree(eq_table
->eqc_base
);
734 err_eqc_base_alloc_fail
:
740 void hns_roce_cleanup_eq_table(struct hns_roce_dev
*hr_dev
)
744 struct hns_roce_eq_table
*eq_table
= &hr_dev
->eq_table
;
746 eq_num
= hr_dev
->caps
.num_comp_vectors
+ hr_dev
->caps
.num_aeq_vectors
;
747 for (i
= 0; i
< eq_num
; i
++) {
749 hns_roce_enable_eq(hr_dev
, i
, EQ_DISABLE
);
751 free_irq(eq_table
->eq
[i
].irq
, eq_table
->eq
+ i
);
753 hns_roce_free_eq(hr_dev
, &eq_table
->eq
[i
]);
756 kfree(eq_table
->eqc_base
);