1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/device.h>
5 #include <linux/dma-direction.h>
6 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include "hclgevf_cmd.h"
11 #include "hclgevf_main.h"
14 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
16 static int hclgevf_ring_space(struct hclgevf_cmq_ring
*ring
)
18 int ntc
= ring
->next_to_clean
;
19 int ntu
= ring
->next_to_use
;
22 used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
24 return ring
->desc_num
- used
- 1;
27 static int hclgevf_is_valid_csq_clean_head(struct hclgevf_cmq_ring
*ring
,
30 int ntu
= ring
->next_to_use
;
31 int ntc
= ring
->next_to_clean
;
34 return head
>= ntc
&& head
<= ntu
;
36 return head
>= ntc
|| head
<= ntu
;
39 static int hclgevf_cmd_csq_clean(struct hclgevf_hw
*hw
)
41 struct hclgevf_dev
*hdev
= container_of(hw
, struct hclgevf_dev
, hw
);
42 struct hclgevf_cmq_ring
*csq
= &hw
->cmq
.csq
;
46 head
= hclgevf_read_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
);
47 rmb(); /* Make sure head is ready before touch any data */
49 if (!hclgevf_is_valid_csq_clean_head(csq
, head
)) {
50 dev_warn(&hdev
->pdev
->dev
, "wrong cmd head (%u, %d-%d)\n", head
,
51 csq
->next_to_use
, csq
->next_to_clean
);
52 dev_warn(&hdev
->pdev
->dev
,
53 "Disabling any further commands to IMP firmware\n");
54 set_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
58 clean
= (head
- csq
->next_to_clean
+ csq
->desc_num
) % csq
->desc_num
;
59 csq
->next_to_clean
= head
;
63 static bool hclgevf_cmd_csq_done(struct hclgevf_hw
*hw
)
67 head
= hclgevf_read_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
);
69 return head
== hw
->cmq
.csq
.next_to_use
;
72 static bool hclgevf_is_special_opcode(u16 opcode
)
74 static const u16 spec_opcode
[] = {0x30, 0x31, 0x32};
77 for (i
= 0; i
< ARRAY_SIZE(spec_opcode
); i
++) {
78 if (spec_opcode
[i
] == opcode
)
85 static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring
*ring
)
87 struct hclgevf_dev
*hdev
= ring
->dev
;
88 struct hclgevf_hw
*hw
= &hdev
->hw
;
91 if (ring
->flag
== HCLGEVF_TYPE_CSQ
) {
92 reg_val
= lower_32_bits(ring
->desc_dma_addr
);
93 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_L_REG
, reg_val
);
94 reg_val
= upper_32_bits(ring
->desc_dma_addr
);
95 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_H_REG
, reg_val
);
97 reg_val
= hclgevf_read_dev(hw
, HCLGEVF_NIC_CSQ_DEPTH_REG
);
98 reg_val
&= HCLGEVF_NIC_SW_RST_RDY
;
99 reg_val
|= (ring
->desc_num
>> HCLGEVF_NIC_CMQ_DESC_NUM_S
);
100 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_DEPTH_REG
, reg_val
);
102 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
, 0);
103 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_TAIL_REG
, 0);
105 reg_val
= lower_32_bits(ring
->desc_dma_addr
);
106 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_L_REG
, reg_val
);
107 reg_val
= upper_32_bits(ring
->desc_dma_addr
);
108 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_H_REG
, reg_val
);
110 reg_val
= (ring
->desc_num
>> HCLGEVF_NIC_CMQ_DESC_NUM_S
);
111 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_DEPTH_REG
, reg_val
);
113 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_HEAD_REG
, 0);
114 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_TAIL_REG
, 0);
118 static void hclgevf_cmd_init_regs(struct hclgevf_hw
*hw
)
120 hclgevf_cmd_config_regs(&hw
->cmq
.csq
);
121 hclgevf_cmd_config_regs(&hw
->cmq
.crq
);
124 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring
*ring
)
126 int size
= ring
->desc_num
* sizeof(struct hclgevf_desc
);
128 ring
->desc
= dma_alloc_coherent(cmq_ring_to_dev(ring
), size
,
129 &ring
->desc_dma_addr
, GFP_KERNEL
);
136 static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring
*ring
)
138 int size
= ring
->desc_num
* sizeof(struct hclgevf_desc
);
141 dma_free_coherent(cmq_ring_to_dev(ring
), size
,
142 ring
->desc
, ring
->desc_dma_addr
);
147 static int hclgevf_alloc_cmd_queue(struct hclgevf_dev
*hdev
, int ring_type
)
149 struct hclgevf_hw
*hw
= &hdev
->hw
;
150 struct hclgevf_cmq_ring
*ring
=
151 (ring_type
== HCLGEVF_TYPE_CSQ
) ? &hw
->cmq
.csq
: &hw
->cmq
.crq
;
155 ring
->flag
= ring_type
;
157 /* allocate CSQ/CRQ descriptor */
158 ret
= hclgevf_alloc_cmd_desc(ring
);
160 dev_err(&hdev
->pdev
->dev
, "failed(%d) to alloc %s desc\n", ret
,
161 (ring_type
== HCLGEVF_TYPE_CSQ
) ? "CSQ" : "CRQ");
166 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc
*desc
,
167 enum hclgevf_opcode_type opcode
, bool is_read
)
169 memset(desc
, 0, sizeof(struct hclgevf_desc
));
170 desc
->opcode
= cpu_to_le16(opcode
);
171 desc
->flag
= cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR
|
172 HCLGEVF_CMD_FLAG_IN
);
174 desc
->flag
|= cpu_to_le16(HCLGEVF_CMD_FLAG_WR
);
176 desc
->flag
&= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR
);
179 static int hclgevf_cmd_convert_err_code(u16 desc_ret
)
182 case HCLGEVF_CMD_EXEC_SUCCESS
:
184 case HCLGEVF_CMD_NO_AUTH
:
186 case HCLGEVF_CMD_NOT_SUPPORTED
:
188 case HCLGEVF_CMD_QUEUE_FULL
:
190 case HCLGEVF_CMD_NEXT_ERR
:
192 case HCLGEVF_CMD_UNEXE_ERR
:
194 case HCLGEVF_CMD_PARA_ERR
:
196 case HCLGEVF_CMD_RESULT_ERR
:
198 case HCLGEVF_CMD_TIMEOUT
:
200 case HCLGEVF_CMD_HILINK_ERR
:
202 case HCLGEVF_CMD_QUEUE_ILLEGAL
:
204 case HCLGEVF_CMD_INVALID
:
211 /* hclgevf_cmd_send - send command to command queue
212 * @hw: pointer to the hw struct
213 * @desc: prefilled descriptor for describing the command
214 * @num : the number of descriptors to be sent
216 * This is the main send command for command queue, it
217 * sends the queue, cleans the queue, etc
219 int hclgevf_cmd_send(struct hclgevf_hw
*hw
, struct hclgevf_desc
*desc
, int num
)
221 struct hclgevf_dev
*hdev
= (struct hclgevf_dev
*)hw
->hdev
;
222 struct hclgevf_cmq_ring
*csq
= &hw
->cmq
.csq
;
223 struct hclgevf_desc
*desc_to_use
;
224 bool complete
= false;
232 spin_lock_bh(&hw
->cmq
.csq
.lock
);
234 if (test_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
)) {
235 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
239 if (num
> hclgevf_ring_space(&hw
->cmq
.csq
)) {
240 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different,
241 * need update the SW HEAD pointer csq->next_to_clean
243 csq
->next_to_clean
= hclgevf_read_dev(hw
,
244 HCLGEVF_NIC_CSQ_HEAD_REG
);
245 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
249 /* Record the location of desc in the ring for this time
250 * which will be use for hardware to write back
252 ntc
= hw
->cmq
.csq
.next_to_use
;
253 opcode
= le16_to_cpu(desc
[0].opcode
);
254 while (handle
< num
) {
255 desc_to_use
= &hw
->cmq
.csq
.desc
[hw
->cmq
.csq
.next_to_use
];
256 *desc_to_use
= desc
[handle
];
257 (hw
->cmq
.csq
.next_to_use
)++;
258 if (hw
->cmq
.csq
.next_to_use
== hw
->cmq
.csq
.desc_num
)
259 hw
->cmq
.csq
.next_to_use
= 0;
263 /* Write to hardware */
264 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_TAIL_REG
,
265 hw
->cmq
.csq
.next_to_use
);
267 /* If the command is sync, wait for the firmware to write back,
268 * if multi descriptors to be sent, use the first one to check
270 if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc
->flag
))) {
272 if (hclgevf_cmd_csq_done(hw
))
276 } while (timeout
< hw
->cmq
.tx_timeout
);
279 if (hclgevf_cmd_csq_done(hw
)) {
283 while (handle
< num
) {
284 /* Get the result of hardware write back */
285 desc_to_use
= &hw
->cmq
.csq
.desc
[ntc
];
286 desc
[handle
] = *desc_to_use
;
288 if (likely(!hclgevf_is_special_opcode(opcode
)))
289 retval
= le16_to_cpu(desc
[handle
].retval
);
291 retval
= le16_to_cpu(desc
[0].retval
);
293 status
= hclgevf_cmd_convert_err_code(retval
);
294 hw
->cmq
.last_status
= (enum hclgevf_cmd_status
)retval
;
297 if (ntc
== hw
->cmq
.csq
.desc_num
)
305 /* Clean the command send queue */
306 handle
= hclgevf_cmd_csq_clean(hw
);
308 dev_warn(&hdev
->pdev
->dev
,
309 "cleaned %d, need to clean %d\n", handle
, num
);
311 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
316 static void hclgevf_set_default_capability(struct hclgevf_dev
*hdev
)
318 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
320 set_bit(HNAE3_DEV_SUPPORT_FD_B
, ae_dev
->caps
);
321 set_bit(HNAE3_DEV_SUPPORT_GRO_B
, ae_dev
->caps
);
322 set_bit(HNAE3_DEV_SUPPORT_FEC_B
, ae_dev
->caps
);
325 static void hclgevf_parse_capability(struct hclgevf_dev
*hdev
,
326 struct hclgevf_query_version_cmd
*cmd
)
328 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
331 caps
= __le32_to_cpu(cmd
->caps
[0]);
333 if (hnae3_get_bit(caps
, HCLGEVF_CAP_UDP_GSO_B
))
334 set_bit(HNAE3_DEV_SUPPORT_UDP_GSO_B
, ae_dev
->caps
);
335 if (hnae3_get_bit(caps
, HCLGEVF_CAP_INT_QL_B
))
336 set_bit(HNAE3_DEV_SUPPORT_INT_QL_B
, ae_dev
->caps
);
337 if (hnae3_get_bit(caps
, HCLGEVF_CAP_TQP_TXRX_INDEP_B
))
338 set_bit(HNAE3_DEV_SUPPORT_TQP_TXRX_INDEP_B
, ae_dev
->caps
);
339 if (hnae3_get_bit(caps
, HCLGEVF_CAP_HW_TX_CSUM_B
))
340 set_bit(HNAE3_DEV_SUPPORT_HW_TX_CSUM_B
, ae_dev
->caps
);
341 if (hnae3_get_bit(caps
, HCLGEVF_CAP_UDP_TUNNEL_CSUM_B
))
342 set_bit(HNAE3_DEV_SUPPORT_UDP_TUNNEL_CSUM_B
, ae_dev
->caps
);
345 static int hclgevf_cmd_query_version_and_capability(struct hclgevf_dev
*hdev
)
347 struct hnae3_ae_dev
*ae_dev
= pci_get_drvdata(hdev
->pdev
);
348 struct hclgevf_query_version_cmd
*resp
;
349 struct hclgevf_desc desc
;
352 resp
= (struct hclgevf_query_version_cmd
*)desc
.data
;
354 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_FW_VER
, 1);
355 status
= hclgevf_cmd_send(&hdev
->hw
, &desc
, 1);
359 hdev
->fw_version
= le32_to_cpu(resp
->firmware
);
361 ae_dev
->dev_version
= le32_to_cpu(resp
->hardware
) <<
362 HNAE3_PCI_REVISION_BIT_SIZE
;
363 ae_dev
->dev_version
|= hdev
->pdev
->revision
;
365 if (ae_dev
->dev_version
>= HNAE3_DEVICE_VERSION_V2
)
366 hclgevf_set_default_capability(hdev
);
368 hclgevf_parse_capability(hdev
, resp
);
373 int hclgevf_cmd_queue_init(struct hclgevf_dev
*hdev
)
377 /* Setup the lock for command queue */
378 spin_lock_init(&hdev
->hw
.cmq
.csq
.lock
);
379 spin_lock_init(&hdev
->hw
.cmq
.crq
.lock
);
381 hdev
->hw
.cmq
.tx_timeout
= HCLGEVF_CMDQ_TX_TIMEOUT
;
382 hdev
->hw
.cmq
.csq
.desc_num
= HCLGEVF_NIC_CMQ_DESC_NUM
;
383 hdev
->hw
.cmq
.crq
.desc_num
= HCLGEVF_NIC_CMQ_DESC_NUM
;
385 ret
= hclgevf_alloc_cmd_queue(hdev
, HCLGEVF_TYPE_CSQ
);
387 dev_err(&hdev
->pdev
->dev
,
388 "CSQ ring setup error %d\n", ret
);
392 ret
= hclgevf_alloc_cmd_queue(hdev
, HCLGEVF_TYPE_CRQ
);
394 dev_err(&hdev
->pdev
->dev
,
395 "CRQ ring setup error %d\n", ret
);
401 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
405 int hclgevf_cmd_init(struct hclgevf_dev
*hdev
)
409 spin_lock_bh(&hdev
->hw
.cmq
.csq
.lock
);
410 spin_lock(&hdev
->hw
.cmq
.crq
.lock
);
412 /* initialize the pointers of async rx queue of mailbox */
413 hdev
->arq
.hdev
= hdev
;
416 atomic_set(&hdev
->arq
.count
, 0);
417 hdev
->hw
.cmq
.csq
.next_to_clean
= 0;
418 hdev
->hw
.cmq
.csq
.next_to_use
= 0;
419 hdev
->hw
.cmq
.crq
.next_to_clean
= 0;
420 hdev
->hw
.cmq
.crq
.next_to_use
= 0;
422 hclgevf_cmd_init_regs(&hdev
->hw
);
424 spin_unlock(&hdev
->hw
.cmq
.crq
.lock
);
425 spin_unlock_bh(&hdev
->hw
.cmq
.csq
.lock
);
427 clear_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
429 /* Check if there is new reset pending, because the higher level
430 * reset may happen when lower level reset is being processed.
432 if (hclgevf_is_reset_pending(hdev
)) {
437 /* get version and device capabilities */
438 ret
= hclgevf_cmd_query_version_and_capability(hdev
);
440 dev_err(&hdev
->pdev
->dev
,
441 "failed to query version and capabilities, ret = %d\n", ret
);
445 dev_info(&hdev
->pdev
->dev
, "The firmware version is %lu.%lu.%lu.%lu\n",
446 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE3_MASK
,
447 HNAE3_FW_VERSION_BYTE3_SHIFT
),
448 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE2_MASK
,
449 HNAE3_FW_VERSION_BYTE2_SHIFT
),
450 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE1_MASK
,
451 HNAE3_FW_VERSION_BYTE1_SHIFT
),
452 hnae3_get_field(hdev
->fw_version
, HNAE3_FW_VERSION_BYTE0_MASK
,
453 HNAE3_FW_VERSION_BYTE0_SHIFT
));
458 set_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
463 static void hclgevf_cmd_uninit_regs(struct hclgevf_hw
*hw
)
465 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_L_REG
, 0);
466 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_H_REG
, 0);
467 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_DEPTH_REG
, 0);
468 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
, 0);
469 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_TAIL_REG
, 0);
470 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_L_REG
, 0);
471 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_H_REG
, 0);
472 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_DEPTH_REG
, 0);
473 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_HEAD_REG
, 0);
474 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_TAIL_REG
, 0);
477 void hclgevf_cmd_uninit(struct hclgevf_dev
*hdev
)
479 spin_lock_bh(&hdev
->hw
.cmq
.csq
.lock
);
480 spin_lock(&hdev
->hw
.cmq
.crq
.lock
);
481 set_bit(HCLGEVF_STATE_CMD_DISABLE
, &hdev
->state
);
482 hclgevf_cmd_uninit_regs(&hdev
->hw
);
483 spin_unlock(&hdev
->hw
.cmq
.crq
.lock
);
484 spin_unlock_bh(&hdev
->hw
.cmq
.csq
.lock
);
485 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
486 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.crq
);