1 // SPDX-License-Identifier: GPL-2.0+
2 // Copyright (c) 2016-2017 Hisilicon Limited.
4 #include <linux/device.h>
5 #include <linux/dma-direction.h>
6 #include <linux/dma-mapping.h>
9 #include <linux/slab.h>
10 #include "hclgevf_cmd.h"
11 #include "hclgevf_main.h"
14 #define hclgevf_is_csq(ring) ((ring)->flag & HCLGEVF_TYPE_CSQ)
15 #define hclgevf_ring_to_dma_dir(ring) (hclgevf_is_csq(ring) ? \
16 DMA_TO_DEVICE : DMA_FROM_DEVICE)
17 #define cmq_ring_to_dev(ring) (&(ring)->dev->pdev->dev)
19 static int hclgevf_ring_space(struct hclgevf_cmq_ring
*ring
)
21 int ntc
= ring
->next_to_clean
;
22 int ntu
= ring
->next_to_use
;
25 used
= (ntu
- ntc
+ ring
->desc_num
) % ring
->desc_num
;
27 return ring
->desc_num
- used
- 1;
30 static int hclgevf_cmd_csq_clean(struct hclgevf_hw
*hw
)
32 struct hclgevf_cmq_ring
*csq
= &hw
->cmq
.csq
;
33 u16 ntc
= csq
->next_to_clean
;
34 struct hclgevf_desc
*desc
;
38 desc
= &csq
->desc
[ntc
];
39 head
= hclgevf_read_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
);
41 memset(desc
, 0, sizeof(*desc
));
43 if (ntc
== csq
->desc_num
)
45 desc
= &csq
->desc
[ntc
];
48 csq
->next_to_clean
= ntc
;
53 static bool hclgevf_cmd_csq_done(struct hclgevf_hw
*hw
)
57 head
= hclgevf_read_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
);
59 return head
== hw
->cmq
.csq
.next_to_use
;
62 static bool hclgevf_is_special_opcode(u16 opcode
)
64 u16 spec_opcode
[] = {0x30, 0x31, 0x32};
67 for (i
= 0; i
< ARRAY_SIZE(spec_opcode
); i
++) {
68 if (spec_opcode
[i
] == opcode
)
75 static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring
*ring
)
77 int size
= ring
->desc_num
* sizeof(struct hclgevf_desc
);
79 ring
->desc
= kzalloc(size
, GFP_KERNEL
);
83 ring
->desc_dma_addr
= dma_map_single(cmq_ring_to_dev(ring
), ring
->desc
,
84 size
, DMA_BIDIRECTIONAL
);
86 if (dma_mapping_error(cmq_ring_to_dev(ring
), ring
->desc_dma_addr
)) {
87 ring
->desc_dma_addr
= 0;
96 static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring
*ring
)
98 dma_unmap_single(cmq_ring_to_dev(ring
), ring
->desc_dma_addr
,
99 ring
->desc_num
* sizeof(ring
->desc
[0]),
100 hclgevf_ring_to_dma_dir(ring
));
102 ring
->desc_dma_addr
= 0;
107 static int hclgevf_init_cmd_queue(struct hclgevf_dev
*hdev
,
108 struct hclgevf_cmq_ring
*ring
)
110 struct hclgevf_hw
*hw
= &hdev
->hw
;
111 int ring_type
= ring
->flag
;
115 ring
->desc_num
= HCLGEVF_NIC_CMQ_DESC_NUM
;
116 spin_lock_init(&ring
->lock
);
117 ring
->next_to_clean
= 0;
118 ring
->next_to_use
= 0;
121 /* allocate CSQ/CRQ descriptor */
122 ret
= hclgevf_alloc_cmd_desc(ring
);
124 dev_err(&hdev
->pdev
->dev
, "failed(%d) to alloc %s desc\n", ret
,
125 (ring_type
== HCLGEVF_TYPE_CSQ
) ? "CSQ" : "CRQ");
129 /* initialize the hardware registers with csq/crq dma-address,
130 * descriptor number, head & tail pointers
133 case HCLGEVF_TYPE_CSQ
:
134 reg_val
= (u32
)ring
->desc_dma_addr
;
135 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_L_REG
, reg_val
);
136 reg_val
= (u32
)((ring
->desc_dma_addr
>> 31) >> 1);
137 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_BASEADDR_H_REG
, reg_val
);
139 reg_val
= (ring
->desc_num
>> HCLGEVF_NIC_CMQ_DESC_NUM_S
);
140 reg_val
|= HCLGEVF_NIC_CMQ_ENABLE
;
141 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_DEPTH_REG
, reg_val
);
143 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_TAIL_REG
, 0);
144 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_HEAD_REG
, 0);
146 case HCLGEVF_TYPE_CRQ
:
147 reg_val
= (u32
)ring
->desc_dma_addr
;
148 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_L_REG
, reg_val
);
149 reg_val
= (u32
)((ring
->desc_dma_addr
>> 31) >> 1);
150 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_BASEADDR_H_REG
, reg_val
);
152 reg_val
= (ring
->desc_num
>> HCLGEVF_NIC_CMQ_DESC_NUM_S
);
153 reg_val
|= HCLGEVF_NIC_CMQ_ENABLE
;
154 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_DEPTH_REG
, reg_val
);
156 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_TAIL_REG
, 0);
157 hclgevf_write_dev(hw
, HCLGEVF_NIC_CRQ_HEAD_REG
, 0);
164 void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc
*desc
,
165 enum hclgevf_opcode_type opcode
, bool is_read
)
167 memset(desc
, 0, sizeof(struct hclgevf_desc
));
168 desc
->opcode
= cpu_to_le16(opcode
);
169 desc
->flag
= cpu_to_le16(HCLGEVF_CMD_FLAG_NO_INTR
|
170 HCLGEVF_CMD_FLAG_IN
);
172 desc
->flag
|= cpu_to_le16(HCLGEVF_CMD_FLAG_WR
);
174 desc
->flag
&= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR
);
177 /* hclgevf_cmd_send - send command to command queue
178 * @hw: pointer to the hw struct
179 * @desc: prefilled descriptor for describing the command
180 * @num : the number of descriptors to be sent
182 * This is the main send command for command queue, it
183 * sends the queue, cleans the queue, etc
185 int hclgevf_cmd_send(struct hclgevf_hw
*hw
, struct hclgevf_desc
*desc
, int num
)
187 struct hclgevf_dev
*hdev
= (struct hclgevf_dev
*)hw
->hdev
;
188 struct hclgevf_desc
*desc_to_use
;
189 bool complete
= false;
197 spin_lock_bh(&hw
->cmq
.csq
.lock
);
199 if (num
> hclgevf_ring_space(&hw
->cmq
.csq
)) {
200 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
204 /* Record the location of desc in the ring for this time
205 * which will be use for hardware to write back
207 ntc
= hw
->cmq
.csq
.next_to_use
;
208 opcode
= le16_to_cpu(desc
[0].opcode
);
209 while (handle
< num
) {
210 desc_to_use
= &hw
->cmq
.csq
.desc
[hw
->cmq
.csq
.next_to_use
];
211 *desc_to_use
= desc
[handle
];
212 (hw
->cmq
.csq
.next_to_use
)++;
213 if (hw
->cmq
.csq
.next_to_use
== hw
->cmq
.csq
.desc_num
)
214 hw
->cmq
.csq
.next_to_use
= 0;
218 /* Write to hardware */
219 hclgevf_write_dev(hw
, HCLGEVF_NIC_CSQ_TAIL_REG
,
220 hw
->cmq
.csq
.next_to_use
);
222 /* If the command is sync, wait for the firmware to write back,
223 * if multi descriptors to be sent, use the first one to check
225 if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc
->flag
))) {
227 if (hclgevf_cmd_csq_done(hw
))
231 } while (timeout
< hw
->cmq
.tx_timeout
);
234 if (hclgevf_cmd_csq_done(hw
)) {
238 while (handle
< num
) {
239 /* Get the result of hardware write back */
240 desc_to_use
= &hw
->cmq
.csq
.desc
[ntc
];
241 desc
[handle
] = *desc_to_use
;
243 if (likely(!hclgevf_is_special_opcode(opcode
)))
244 retval
= le16_to_cpu(desc
[handle
].retval
);
246 retval
= le16_to_cpu(desc
[0].retval
);
248 if ((enum hclgevf_cmd_return_status
)retval
==
249 HCLGEVF_CMD_EXEC_SUCCESS
)
253 hw
->cmq
.last_status
= (enum hclgevf_cmd_status
)retval
;
256 if (ntc
== hw
->cmq
.csq
.desc_num
)
264 /* Clean the command send queue */
265 handle
= hclgevf_cmd_csq_clean(hw
);
267 dev_warn(&hdev
->pdev
->dev
,
268 "cleaned %d, need to clean %d\n", handle
, num
);
271 spin_unlock_bh(&hw
->cmq
.csq
.lock
);
276 static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw
*hw
,
279 struct hclgevf_query_version_cmd
*resp
;
280 struct hclgevf_desc desc
;
283 resp
= (struct hclgevf_query_version_cmd
*)desc
.data
;
285 hclgevf_cmd_setup_basic_desc(&desc
, HCLGEVF_OPC_QUERY_FW_VER
, 1);
286 status
= hclgevf_cmd_send(hw
, &desc
, 1);
288 *version
= le32_to_cpu(resp
->firmware
);
293 int hclgevf_cmd_init(struct hclgevf_dev
*hdev
)
298 /* setup Tx write back timeout */
299 hdev
->hw
.cmq
.tx_timeout
= HCLGEVF_CMDQ_TX_TIMEOUT
;
301 /* setup queue CSQ/CRQ rings */
302 hdev
->hw
.cmq
.csq
.flag
= HCLGEVF_TYPE_CSQ
;
303 ret
= hclgevf_init_cmd_queue(hdev
, &hdev
->hw
.cmq
.csq
);
305 dev_err(&hdev
->pdev
->dev
,
306 "failed(%d) to initialize CSQ ring\n", ret
);
310 hdev
->hw
.cmq
.crq
.flag
= HCLGEVF_TYPE_CRQ
;
311 ret
= hclgevf_init_cmd_queue(hdev
, &hdev
->hw
.cmq
.crq
);
313 dev_err(&hdev
->pdev
->dev
,
314 "failed(%d) to initialize CRQ ring\n", ret
);
318 /* get firmware version */
319 ret
= hclgevf_cmd_query_firmware_version(&hdev
->hw
, &version
);
321 dev_err(&hdev
->pdev
->dev
,
322 "failed(%d) to query firmware version\n", ret
);
325 hdev
->fw_version
= version
;
327 dev_info(&hdev
->pdev
->dev
, "The firmware version is %08x\n", version
);
331 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.crq
);
333 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
338 void hclgevf_cmd_uninit(struct hclgevf_dev
*hdev
)
340 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.csq
);
341 hclgevf_free_cmd_desc(&hdev
->hw
.cmq
.crq
);