1 // SPDX-License-Identifier: GPL-2.0-only
3 * Huawei HiNIC PCI Express Linux driver
4 * Copyright(c) 2017 Huawei Technologies Co., Ltd
7 #include <linux/kernel.h>
8 #include <linux/types.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/slab.h>
13 #include <linux/semaphore.h>
14 #include <linux/dma-mapping.h>
16 #include <linux/err.h>
18 #include "hinic_hw_dev.h"
19 #include "hinic_hw_if.h"
20 #include "hinic_hw_eqs.h"
21 #include "hinic_hw_wqe.h"
22 #include "hinic_hw_wq.h"
23 #include "hinic_hw_cmdq.h"
24 #include "hinic_hw_qp_ctxt.h"
25 #include "hinic_hw_qp.h"
26 #include "hinic_hw_io.h"
28 #define CI_Q_ADDR_SIZE sizeof(u32)
30 #define CI_ADDR(base_addr, q_id) ((base_addr) + \
31 (q_id) * CI_Q_ADDR_SIZE)
33 #define CI_TABLE_SIZE(num_qps) ((num_qps) * CI_Q_ADDR_SIZE)
35 #define DB_IDX(db, db_base) \
36 (((unsigned long)(db) - (unsigned long)(db_base)) / HINIC_DB_PAGE_SIZE)
38 #define HINIC_PAGE_SIZE_HW(pg_size) ((u8)ilog2((u32)((pg_size) >> 12)))
41 IO_CMD_MODIFY_QUEUE_CTXT
= 0,
42 IO_CMD_CLEAN_QUEUE_CTXT
,
45 static void init_db_area_idx(struct hinic_free_db_area
*free_db_area
)
49 for (i
= 0; i
< HINIC_DB_MAX_AREAS
; i
++)
50 free_db_area
->db_idx
[i
] = i
;
52 free_db_area
->alloc_pos
= 0;
53 free_db_area
->return_pos
= HINIC_DB_MAX_AREAS
;
55 free_db_area
->num_free
= HINIC_DB_MAX_AREAS
;
57 sema_init(&free_db_area
->idx_lock
, 1);
60 static void __iomem
*get_db_area(struct hinic_func_to_io
*func_to_io
)
62 struct hinic_free_db_area
*free_db_area
= &func_to_io
->free_db_area
;
65 down(&free_db_area
->idx_lock
);
67 free_db_area
->num_free
--;
69 if (free_db_area
->num_free
< 0) {
70 free_db_area
->num_free
++;
71 up(&free_db_area
->idx_lock
);
72 return ERR_PTR(-ENOMEM
);
75 pos
= free_db_area
->alloc_pos
++;
76 pos
&= HINIC_DB_MAX_AREAS
- 1;
78 idx
= free_db_area
->db_idx
[pos
];
80 free_db_area
->db_idx
[pos
] = -1;
82 up(&free_db_area
->idx_lock
);
84 return func_to_io
->db_base
+ idx
* HINIC_DB_PAGE_SIZE
;
87 static void return_db_area(struct hinic_func_to_io
*func_to_io
,
88 void __iomem
*db_base
)
90 struct hinic_free_db_area
*free_db_area
= &func_to_io
->free_db_area
;
91 int pos
, idx
= DB_IDX(db_base
, func_to_io
->db_base
);
93 down(&free_db_area
->idx_lock
);
95 pos
= free_db_area
->return_pos
++;
96 pos
&= HINIC_DB_MAX_AREAS
- 1;
98 free_db_area
->db_idx
[pos
] = idx
;
100 free_db_area
->num_free
++;
102 up(&free_db_area
->idx_lock
);
105 static int write_sq_ctxts(struct hinic_func_to_io
*func_to_io
, u16 base_qpn
,
108 struct hinic_hwif
*hwif
= func_to_io
->hwif
;
109 struct hinic_sq_ctxt_block
*sq_ctxt_block
;
110 struct pci_dev
*pdev
= hwif
->pdev
;
111 struct hinic_cmdq_buf cmdq_buf
;
112 struct hinic_sq_ctxt
*sq_ctxt
;
117 err
= hinic_alloc_cmdq_buf(&func_to_io
->cmdqs
, &cmdq_buf
);
119 dev_err(&pdev
->dev
, "Failed to allocate cmdq buf\n");
123 sq_ctxt_block
= cmdq_buf
.buf
;
124 sq_ctxt
= sq_ctxt_block
->sq_ctxt
;
126 hinic_qp_prepare_header(&sq_ctxt_block
->hdr
, HINIC_QP_CTXT_TYPE_SQ
,
127 num_sqs
, func_to_io
->max_qps
);
128 for (i
= 0; i
< num_sqs
; i
++) {
129 qp
= &func_to_io
->qps
[i
];
131 hinic_sq_prepare_ctxt(&sq_ctxt
[i
], &qp
->sq
,
132 base_qpn
+ qp
->q_id
);
135 cmdq_buf
.size
= HINIC_SQ_CTXT_SIZE(num_sqs
);
137 err
= hinic_cmdq_direct_resp(&func_to_io
->cmdqs
, HINIC_MOD_L2NIC
,
138 IO_CMD_MODIFY_QUEUE_CTXT
, &cmdq_buf
,
140 if ((err
) || (out_param
!= 0)) {
141 dev_err(&pdev
->dev
, "Failed to set SQ ctxts\n");
145 hinic_free_cmdq_buf(&func_to_io
->cmdqs
, &cmdq_buf
);
149 static int write_rq_ctxts(struct hinic_func_to_io
*func_to_io
, u16 base_qpn
,
152 struct hinic_hwif
*hwif
= func_to_io
->hwif
;
153 struct hinic_rq_ctxt_block
*rq_ctxt_block
;
154 struct pci_dev
*pdev
= hwif
->pdev
;
155 struct hinic_cmdq_buf cmdq_buf
;
156 struct hinic_rq_ctxt
*rq_ctxt
;
161 err
= hinic_alloc_cmdq_buf(&func_to_io
->cmdqs
, &cmdq_buf
);
163 dev_err(&pdev
->dev
, "Failed to allocate cmdq buf\n");
167 rq_ctxt_block
= cmdq_buf
.buf
;
168 rq_ctxt
= rq_ctxt_block
->rq_ctxt
;
170 hinic_qp_prepare_header(&rq_ctxt_block
->hdr
, HINIC_QP_CTXT_TYPE_RQ
,
171 num_rqs
, func_to_io
->max_qps
);
172 for (i
= 0; i
< num_rqs
; i
++) {
173 qp
= &func_to_io
->qps
[i
];
175 hinic_rq_prepare_ctxt(&rq_ctxt
[i
], &qp
->rq
,
176 base_qpn
+ qp
->q_id
);
179 cmdq_buf
.size
= HINIC_RQ_CTXT_SIZE(num_rqs
);
181 err
= hinic_cmdq_direct_resp(&func_to_io
->cmdqs
, HINIC_MOD_L2NIC
,
182 IO_CMD_MODIFY_QUEUE_CTXT
, &cmdq_buf
,
184 if ((err
) || (out_param
!= 0)) {
185 dev_err(&pdev
->dev
, "Failed to set RQ ctxts\n");
189 hinic_free_cmdq_buf(&func_to_io
->cmdqs
, &cmdq_buf
);
194 * write_qp_ctxts - write the qp ctxt to HW
195 * @func_to_io: func to io channel that holds the IO components
196 * @base_qpn: first qp number
197 * @num_qps: number of qps to write
199 * Return 0 - Success, negative - Failure
201 static int write_qp_ctxts(struct hinic_func_to_io
*func_to_io
, u16 base_qpn
,
204 return (write_sq_ctxts(func_to_io
, base_qpn
, num_qps
) ||
205 write_rq_ctxts(func_to_io
, base_qpn
, num_qps
));
208 static int hinic_clean_queue_offload_ctxt(struct hinic_func_to_io
*func_to_io
,
209 enum hinic_qp_ctxt_type ctxt_type
)
211 struct hinic_hwif
*hwif
= func_to_io
->hwif
;
212 struct hinic_clean_queue_ctxt
*ctxt_block
;
213 struct pci_dev
*pdev
= hwif
->pdev
;
214 struct hinic_cmdq_buf cmdq_buf
;
218 err
= hinic_alloc_cmdq_buf(&func_to_io
->cmdqs
, &cmdq_buf
);
220 dev_err(&pdev
->dev
, "Failed to allocate cmdq buf\n");
224 ctxt_block
= cmdq_buf
.buf
;
225 ctxt_block
->cmdq_hdr
.num_queues
= func_to_io
->max_qps
;
226 ctxt_block
->cmdq_hdr
.queue_type
= ctxt_type
;
227 ctxt_block
->cmdq_hdr
.addr_offset
= 0;
229 /* TSO/LRO ctxt size: 0x0:0B; 0x1:160B; 0x2:200B; 0x3:240B */
230 ctxt_block
->ctxt_size
= 0x3;
232 hinic_cpu_to_be32(ctxt_block
, sizeof(*ctxt_block
));
234 cmdq_buf
.size
= sizeof(*ctxt_block
);
236 err
= hinic_cmdq_direct_resp(&func_to_io
->cmdqs
, HINIC_MOD_L2NIC
,
237 IO_CMD_CLEAN_QUEUE_CTXT
,
238 &cmdq_buf
, &out_param
);
240 if (err
|| out_param
) {
241 dev_err(&pdev
->dev
, "Failed to clean offload ctxts, err: %d, out_param: 0x%llx\n",
247 hinic_free_cmdq_buf(&func_to_io
->cmdqs
, &cmdq_buf
);
252 static int hinic_clean_qp_offload_ctxt(struct hinic_func_to_io
*func_to_io
)
254 /* clean LRO/TSO context space */
255 return (hinic_clean_queue_offload_ctxt(func_to_io
,
256 HINIC_QP_CTXT_TYPE_SQ
) ||
257 hinic_clean_queue_offload_ctxt(func_to_io
,
258 HINIC_QP_CTXT_TYPE_RQ
));
262 * init_qp - Initialize a Queue Pair
263 * @func_to_io: func to io channel that holds the IO components
264 * @qp: pointer to the qp to initialize
265 * @q_id: the id of the qp
266 * @sq_msix_entry: msix entry for sq
267 * @rq_msix_entry: msix entry for rq
269 * Return 0 - Success, negative - Failure
271 static int init_qp(struct hinic_func_to_io
*func_to_io
,
272 struct hinic_qp
*qp
, int q_id
,
273 struct msix_entry
*sq_msix_entry
,
274 struct msix_entry
*rq_msix_entry
)
276 struct hinic_hwif
*hwif
= func_to_io
->hwif
;
277 struct pci_dev
*pdev
= hwif
->pdev
;
278 void __iomem
*db_base
;
283 err
= hinic_wq_allocate(&func_to_io
->wqs
, &func_to_io
->sq_wq
[q_id
],
284 HINIC_SQ_WQEBB_SIZE
, HINIC_SQ_PAGE_SIZE
,
285 func_to_io
->sq_depth
, HINIC_SQ_WQE_MAX_SIZE
);
287 dev_err(&pdev
->dev
, "Failed to allocate WQ for SQ\n");
291 err
= hinic_wq_allocate(&func_to_io
->wqs
, &func_to_io
->rq_wq
[q_id
],
292 HINIC_RQ_WQEBB_SIZE
, HINIC_RQ_PAGE_SIZE
,
293 func_to_io
->rq_depth
, HINIC_RQ_WQE_SIZE
);
295 dev_err(&pdev
->dev
, "Failed to allocate WQ for RQ\n");
299 db_base
= get_db_area(func_to_io
);
300 if (IS_ERR(db_base
)) {
301 dev_err(&pdev
->dev
, "Failed to get DB area for SQ\n");
302 err
= PTR_ERR(db_base
);
306 func_to_io
->sq_db
[q_id
] = db_base
;
309 err
= hinic_init_sq(&qp
->sq
, hwif
, &func_to_io
->sq_wq
[q_id
],
311 CI_ADDR(func_to_io
->ci_addr_base
, q_id
),
312 CI_ADDR(func_to_io
->ci_dma_base
, q_id
), db_base
);
314 dev_err(&pdev
->dev
, "Failed to init SQ\n");
319 err
= hinic_init_rq(&qp
->rq
, hwif
, &func_to_io
->rq_wq
[q_id
],
322 dev_err(&pdev
->dev
, "Failed to init RQ\n");
329 hinic_clean_sq(&qp
->sq
);
332 return_db_area(func_to_io
, db_base
);
335 hinic_wq_free(&func_to_io
->wqs
, &func_to_io
->rq_wq
[q_id
]);
338 hinic_wq_free(&func_to_io
->wqs
, &func_to_io
->sq_wq
[q_id
]);
343 * destroy_qp - Clean the resources of a Queue Pair
344 * @func_to_io: func to io channel that holds the IO components
345 * @qp: pointer to the qp to clean
347 static void destroy_qp(struct hinic_func_to_io
*func_to_io
,
352 hinic_clean_rq(&qp
->rq
);
353 hinic_clean_sq(&qp
->sq
);
355 return_db_area(func_to_io
, func_to_io
->sq_db
[q_id
]);
357 hinic_wq_free(&func_to_io
->wqs
, &func_to_io
->rq_wq
[q_id
]);
358 hinic_wq_free(&func_to_io
->wqs
, &func_to_io
->sq_wq
[q_id
]);
362 * hinic_io_create_qps - Create Queue Pairs
363 * @func_to_io: func to io channel that holds the IO components
364 * @base_qpn: base qp number
365 * @num_qps: number queue pairs to create
366 * @sq_msix_entries: msix entries for sq
367 * @rq_msix_entries: msix entries for rq
369 * Return 0 - Success, negative - Failure
371 int hinic_io_create_qps(struct hinic_func_to_io
*func_to_io
,
372 u16 base_qpn
, int num_qps
,
373 struct msix_entry
*sq_msix_entries
,
374 struct msix_entry
*rq_msix_entries
)
376 struct hinic_hwif
*hwif
= func_to_io
->hwif
;
377 struct pci_dev
*pdev
= hwif
->pdev
;
378 size_t qps_size
, wq_size
, db_size
;
382 qps_size
= num_qps
* sizeof(*func_to_io
->qps
);
383 func_to_io
->qps
= devm_kzalloc(&pdev
->dev
, qps_size
, GFP_KERNEL
);
384 if (!func_to_io
->qps
)
387 wq_size
= num_qps
* sizeof(*func_to_io
->sq_wq
);
388 func_to_io
->sq_wq
= devm_kzalloc(&pdev
->dev
, wq_size
, GFP_KERNEL
);
389 if (!func_to_io
->sq_wq
) {
394 wq_size
= num_qps
* sizeof(*func_to_io
->rq_wq
);
395 func_to_io
->rq_wq
= devm_kzalloc(&pdev
->dev
, wq_size
, GFP_KERNEL
);
396 if (!func_to_io
->rq_wq
) {
401 db_size
= num_qps
* sizeof(*func_to_io
->sq_db
);
402 func_to_io
->sq_db
= devm_kzalloc(&pdev
->dev
, db_size
, GFP_KERNEL
);
403 if (!func_to_io
->sq_db
) {
408 ci_addr_base
= dma_alloc_coherent(&pdev
->dev
, CI_TABLE_SIZE(num_qps
),
409 &func_to_io
->ci_dma_base
,
412 dev_err(&pdev
->dev
, "Failed to allocate CI area\n");
417 func_to_io
->ci_addr_base
= ci_addr_base
;
419 for (i
= 0; i
< num_qps
; i
++) {
420 err
= init_qp(func_to_io
, &func_to_io
->qps
[i
], i
,
421 &sq_msix_entries
[i
], &rq_msix_entries
[i
]);
423 dev_err(&pdev
->dev
, "Failed to create QP %d\n", i
);
428 err
= write_qp_ctxts(func_to_io
, base_qpn
, num_qps
);
430 dev_err(&pdev
->dev
, "Failed to init QP ctxts\n");
431 goto err_write_qp_ctxts
;
434 err
= hinic_clean_qp_offload_ctxt(func_to_io
);
436 dev_err(&pdev
->dev
, "Failed to clean QP contexts space\n");
437 goto err_write_qp_ctxts
;
444 for (j
= 0; j
< i
; j
++)
445 destroy_qp(func_to_io
, &func_to_io
->qps
[j
]);
447 dma_free_coherent(&pdev
->dev
, CI_TABLE_SIZE(num_qps
),
448 func_to_io
->ci_addr_base
, func_to_io
->ci_dma_base
);
451 devm_kfree(&pdev
->dev
, func_to_io
->sq_db
);
454 devm_kfree(&pdev
->dev
, func_to_io
->rq_wq
);
457 devm_kfree(&pdev
->dev
, func_to_io
->sq_wq
);
460 devm_kfree(&pdev
->dev
, func_to_io
->qps
);
465 * hinic_io_destroy_qps - Destroy the IO Queue Pairs
466 * @func_to_io: func to io channel that holds the IO components
467 * @num_qps: number queue pairs to destroy
469 void hinic_io_destroy_qps(struct hinic_func_to_io
*func_to_io
, int num_qps
)
471 struct hinic_hwif
*hwif
= func_to_io
->hwif
;
472 struct pci_dev
*pdev
= hwif
->pdev
;
473 size_t ci_table_size
;
476 ci_table_size
= CI_TABLE_SIZE(num_qps
);
478 for (i
= 0; i
< num_qps
; i
++)
479 destroy_qp(func_to_io
, &func_to_io
->qps
[i
]);
481 dma_free_coherent(&pdev
->dev
, ci_table_size
, func_to_io
->ci_addr_base
,
482 func_to_io
->ci_dma_base
);
484 devm_kfree(&pdev
->dev
, func_to_io
->sq_db
);
486 devm_kfree(&pdev
->dev
, func_to_io
->rq_wq
);
487 devm_kfree(&pdev
->dev
, func_to_io
->sq_wq
);
489 devm_kfree(&pdev
->dev
, func_to_io
->qps
);
492 int hinic_set_wq_page_size(struct hinic_hwdev
*hwdev
, u16 func_idx
,
495 struct hinic_wq_page_size page_size_info
= {0};
496 u16 out_size
= sizeof(page_size_info
);
497 struct hinic_pfhwdev
*pfhwdev
;
500 pfhwdev
= container_of(hwdev
, struct hinic_pfhwdev
, hwdev
);
502 page_size_info
.func_idx
= func_idx
;
503 page_size_info
.ppf_idx
= HINIC_HWIF_PPF_IDX(hwdev
->hwif
);
504 page_size_info
.page_size
= HINIC_PAGE_SIZE_HW(page_size
);
506 err
= hinic_msg_to_mgmt(&pfhwdev
->pf_to_mgmt
, HINIC_MOD_COMM
,
507 HINIC_COMM_CMD_PAGESIZE_SET
, &page_size_info
,
508 sizeof(page_size_info
), &page_size_info
,
509 &out_size
, HINIC_MGMT_MSG_SYNC
);
510 if (err
|| !out_size
|| page_size_info
.status
) {
511 dev_err(&hwdev
->hwif
->pdev
->dev
, "Failed to set wq page size, err: %d, status: 0x%x, out_size: 0x%0x\n",
512 err
, page_size_info
.status
, out_size
);
520 * hinic_io_init - Initialize the IO components
521 * @func_to_io: func to io channel that holds the IO components
522 * @hwif: HW interface for accessing IO
523 * @max_qps: maximum QPs in HW
524 * @num_ceqs: number completion event queues
525 * @ceq_msix_entries: msix entries for ceqs
527 * Return 0 - Success, negative - Failure
529 int hinic_io_init(struct hinic_func_to_io
*func_to_io
,
530 struct hinic_hwif
*hwif
, u16 max_qps
, int num_ceqs
,
531 struct msix_entry
*ceq_msix_entries
)
533 struct pci_dev
*pdev
= hwif
->pdev
;
534 enum hinic_cmdq_type cmdq
, type
;
535 void __iomem
*db_area
;
538 func_to_io
->hwif
= hwif
;
539 func_to_io
->qps
= NULL
;
540 func_to_io
->max_qps
= max_qps
;
541 func_to_io
->ceqs
.hwdev
= func_to_io
->hwdev
;
543 err
= hinic_ceqs_init(&func_to_io
->ceqs
, hwif
, num_ceqs
,
544 HINIC_DEFAULT_CEQ_LEN
, HINIC_EQ_PAGE_SIZE
,
547 dev_err(&pdev
->dev
, "Failed to init CEQs\n");
551 err
= hinic_wqs_alloc(&func_to_io
->wqs
, 2 * max_qps
, hwif
);
553 dev_err(&pdev
->dev
, "Failed to allocate WQS for IO\n");
557 func_to_io
->db_base
= pci_ioremap_bar(pdev
, HINIC_PCI_DB_BAR
);
558 if (!func_to_io
->db_base
) {
559 dev_err(&pdev
->dev
, "Failed to remap IO DB area\n");
564 init_db_area_idx(&func_to_io
->free_db_area
);
566 for (cmdq
= HINIC_CMDQ_SYNC
; cmdq
< HINIC_MAX_CMDQ_TYPES
; cmdq
++) {
567 db_area
= get_db_area(func_to_io
);
568 if (IS_ERR(db_area
)) {
569 dev_err(&pdev
->dev
, "Failed to get cmdq db area\n");
570 err
= PTR_ERR(db_area
);
574 func_to_io
->cmdq_db_area
[cmdq
] = db_area
;
577 err
= hinic_set_wq_page_size(func_to_io
->hwdev
,
578 HINIC_HWIF_FUNC_IDX(hwif
),
579 HINIC_DEFAULT_WQ_PAGE_SIZE
);
581 dev_err(&func_to_io
->hwif
->pdev
->dev
, "Failed to set wq page size\n");
582 goto init_wq_pg_size_err
;
585 err
= hinic_init_cmdqs(&func_to_io
->cmdqs
, hwif
,
586 func_to_io
->cmdq_db_area
);
588 dev_err(&pdev
->dev
, "Failed to initialize cmdqs\n");
595 if (!HINIC_IS_VF(func_to_io
->hwif
))
596 hinic_set_wq_page_size(func_to_io
->hwdev
,
597 HINIC_HWIF_FUNC_IDX(hwif
),
598 HINIC_HW_WQ_PAGE_SIZE
);
601 for (type
= HINIC_CMDQ_SYNC
; type
< cmdq
; type
++)
602 return_db_area(func_to_io
, func_to_io
->cmdq_db_area
[type
]);
604 iounmap(func_to_io
->db_base
);
607 hinic_wqs_free(&func_to_io
->wqs
);
610 hinic_ceqs_free(&func_to_io
->ceqs
);
615 * hinic_io_free - Free the IO components
616 * @func_to_io: func to io channel that holds the IO components
618 void hinic_io_free(struct hinic_func_to_io
*func_to_io
)
620 enum hinic_cmdq_type cmdq
;
622 hinic_free_cmdqs(&func_to_io
->cmdqs
);
624 if (!HINIC_IS_VF(func_to_io
->hwif
))
625 hinic_set_wq_page_size(func_to_io
->hwdev
,
626 HINIC_HWIF_FUNC_IDX(func_to_io
->hwif
),
627 HINIC_HW_WQ_PAGE_SIZE
);
629 for (cmdq
= HINIC_CMDQ_SYNC
; cmdq
< HINIC_MAX_CMDQ_TYPES
; cmdq
++)
630 return_db_area(func_to_io
, func_to_io
->cmdq_db_area
[cmdq
]);
632 iounmap(func_to_io
->db_base
);
633 hinic_wqs_free(&func_to_io
->wqs
);
634 hinic_ceqs_free(&func_to_io
->ceqs
);