treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / infiniband / hw / bnxt_re / qplib_rcfw.h
blobdfeadc192e17401525de94f23080ba287ad21503
1 /*
2 * Broadcom NetXtreme-E RoCE driver.
4 * Copyright (c) 2016 - 2017, Broadcom. All rights reserved. The term
5 * Broadcom refers to Broadcom Limited and/or its subsidiaries.
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * BSD license below:
13 * Redistribution and use in source and binary forms, with or without
14 * modification, are permitted provided that the following conditions
15 * are met:
17 * 1. Redistributions of source code must retain the above copyright
18 * notice, this list of conditions and the following disclaimer.
19 * 2. Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the
22 * distribution.
24 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS''
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
26 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS
28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
31 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
32 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
33 * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
34 * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 * Description: RDMA Controller HW interface (header)
39 #ifndef __BNXT_QPLIB_RCFW_H__
40 #define __BNXT_QPLIB_RCFW_H__
42 #define RCFW_CMDQ_TRIG_VAL 1
43 #define RCFW_COMM_PCI_BAR_REGION 0
44 #define RCFW_COMM_CONS_PCI_BAR_REGION 2
45 #define RCFW_COMM_BASE_OFFSET 0x600
46 #define RCFW_PF_COMM_PROD_OFFSET 0xc
47 #define RCFW_VF_COMM_PROD_OFFSET 0xc
48 #define RCFW_COMM_TRIG_OFFSET 0x100
49 #define RCFW_COMM_SIZE 0x104
51 #define RCFW_DBR_PCI_BAR_REGION 2
52 #define RCFW_DBR_BASE_PAGE_SHIFT 12
54 #define RCFW_CMD_PREP(req, CMD, cmd_flags) \
55 do { \
56 memset(&(req), 0, sizeof((req))); \
57 (req).opcode = CMDQ_BASE_OPCODE_##CMD; \
58 (req).cmd_size = sizeof((req)); \
59 (req).flags = cpu_to_le16(cmd_flags); \
60 } while (0)
62 #define RCFW_CMD_WAIT_TIME_MS 20000 /* 20 Seconds timeout */
64 /* Cmdq contains a fix number of a 16-Byte slots */
65 struct bnxt_qplib_cmdqe {
66 u8 data[16];
69 /* CMDQ elements */
70 #define BNXT_QPLIB_CMDQE_MAX_CNT_256 256
71 #define BNXT_QPLIB_CMDQE_MAX_CNT_8192 8192
72 #define BNXT_QPLIB_CMDQE_UNITS sizeof(struct bnxt_qplib_cmdqe)
73 #define BNXT_QPLIB_CMDQE_BYTES(depth) ((depth) * BNXT_QPLIB_CMDQE_UNITS)
75 static inline u32 bnxt_qplib_cmdqe_npages(u32 depth)
77 u32 npages;
79 npages = BNXT_QPLIB_CMDQE_BYTES(depth) / PAGE_SIZE;
80 if (BNXT_QPLIB_CMDQE_BYTES(depth) % PAGE_SIZE)
81 npages++;
82 return npages;
85 static inline u32 bnxt_qplib_cmdqe_page_size(u32 depth)
87 return (bnxt_qplib_cmdqe_npages(depth) * PAGE_SIZE);
90 static inline u32 bnxt_qplib_cmdqe_cnt_per_pg(u32 depth)
92 return (bnxt_qplib_cmdqe_page_size(depth) /
93 BNXT_QPLIB_CMDQE_UNITS);
96 /* Set the cmd_size to a factor of CMDQE unit */
97 static inline void bnxt_qplib_set_cmd_slots(struct cmdq_base *req)
99 req->cmd_size = (req->cmd_size + BNXT_QPLIB_CMDQE_UNITS - 1) /
100 BNXT_QPLIB_CMDQE_UNITS;
103 #define MAX_CMDQ_IDX(depth) ((depth) - 1)
105 static inline u32 bnxt_qplib_max_cmdq_idx_per_pg(u32 depth)
107 return (bnxt_qplib_cmdqe_cnt_per_pg(depth) - 1);
110 #define RCFW_MAX_COOKIE_VALUE 0x7FFF
111 #define RCFW_CMD_IS_BLOCKING 0x8000
112 #define RCFW_BLOCKED_CMD_WAIT_COUNT 0x4E20
114 #define HWRM_VERSION_RCFW_CMDQ_DEPTH_CHECK 0x1000900020011ULL
116 static inline u32 get_cmdq_pg(u32 val, u32 depth)
118 return (val & ~(bnxt_qplib_max_cmdq_idx_per_pg(depth))) /
119 (bnxt_qplib_cmdqe_cnt_per_pg(depth));
122 static inline u32 get_cmdq_idx(u32 val, u32 depth)
124 return val & (bnxt_qplib_max_cmdq_idx_per_pg(depth));
127 /* Crsq buf is 1024-Byte */
128 struct bnxt_qplib_crsbe {
129 u8 data[1024];
132 /* CREQ */
133 /* Allocate 1 per QP for async error notification for now */
134 #define BNXT_QPLIB_CREQE_MAX_CNT (64 * 1024)
135 #define BNXT_QPLIB_CREQE_UNITS 16 /* 16-Bytes per prod unit */
136 #define BNXT_QPLIB_CREQE_CNT_PER_PG (PAGE_SIZE / BNXT_QPLIB_CREQE_UNITS)
138 #define MAX_CREQ_IDX (BNXT_QPLIB_CREQE_MAX_CNT - 1)
139 #define MAX_CREQ_IDX_PER_PG (BNXT_QPLIB_CREQE_CNT_PER_PG - 1)
141 static inline u32 get_creq_pg(u32 val)
143 return (val & ~MAX_CREQ_IDX_PER_PG) / BNXT_QPLIB_CREQE_CNT_PER_PG;
146 static inline u32 get_creq_idx(u32 val)
148 return val & MAX_CREQ_IDX_PER_PG;
151 #define BNXT_QPLIB_CREQE_PER_PG (PAGE_SIZE / sizeof(struct creq_base))
153 #define CREQ_CMP_VALID(hdr, raw_cons, cp_bit) \
154 (!!((hdr)->v & CREQ_BASE_V) == \
155 !((raw_cons) & (cp_bit)))
157 #define CREQ_DB_KEY_CP (0x2 << CMPL_DOORBELL_KEY_SFT)
158 #define CREQ_DB_IDX_VALID CMPL_DOORBELL_IDX_VALID
159 #define CREQ_DB_IRQ_DIS CMPL_DOORBELL_MASK
160 #define CREQ_DB_CP_FLAGS_REARM (CREQ_DB_KEY_CP | \
161 CREQ_DB_IDX_VALID)
162 #define CREQ_DB_CP_FLAGS (CREQ_DB_KEY_CP | \
163 CREQ_DB_IDX_VALID | \
164 CREQ_DB_IRQ_DIS)
166 static inline void bnxt_qplib_ring_creq_db64(void __iomem *db, u32 index,
167 u32 xid, bool arm)
169 u64 val = 0;
171 val = xid & DBC_DBC_XID_MASK;
172 val |= DBC_DBC_PATH_ROCE;
173 val |= arm ? DBC_DBC_TYPE_NQ_ARM : DBC_DBC_TYPE_NQ;
174 val <<= 32;
175 val |= index & DBC_DBC_INDEX_MASK;
177 writeq(val, db);
180 static inline void bnxt_qplib_ring_creq_db_rearm(void __iomem *db, u32 raw_cons,
181 u32 max_elements, u32 xid,
182 bool gen_p5)
184 u32 index = raw_cons & (max_elements - 1);
186 if (gen_p5)
187 bnxt_qplib_ring_creq_db64(db, index, xid, true);
188 else
189 writel(CREQ_DB_CP_FLAGS_REARM | (index & DBC_DBC32_XID_MASK),
190 db);
193 static inline void bnxt_qplib_ring_creq_db(void __iomem *db, u32 raw_cons,
194 u32 max_elements, u32 xid,
195 bool gen_p5)
197 u32 index = raw_cons & (max_elements - 1);
199 if (gen_p5)
200 bnxt_qplib_ring_creq_db64(db, index, xid, true);
201 else
202 writel(CREQ_DB_CP_FLAGS | (index & DBC_DBC32_XID_MASK),
203 db);
206 #define CREQ_ENTRY_POLL_BUDGET 0x100
208 /* HWQ */
210 struct bnxt_qplib_crsq {
211 struct creq_qp_event *resp;
212 u32 req_size;
215 struct bnxt_qplib_rcfw_sbuf {
216 void *sb;
217 dma_addr_t dma_addr;
218 u32 size;
221 struct bnxt_qplib_qp_node {
222 u32 qp_id; /* QP id */
223 void *qp_handle; /* ptr to qplib_qp */
226 #define BNXT_QPLIB_OOS_COUNT_MASK 0xFFFFFFFF
228 /* RCFW Communication Channels */
229 struct bnxt_qplib_rcfw {
230 struct pci_dev *pdev;
231 struct bnxt_qplib_res *res;
232 int vector;
233 struct tasklet_struct worker;
234 bool requested;
235 unsigned long *cmdq_bitmap;
236 u32 bmap_size;
237 unsigned long flags;
238 #define FIRMWARE_INITIALIZED_FLAG 0
239 #define FIRMWARE_FIRST_FLAG 31
240 #define FIRMWARE_TIMED_OUT 3
241 wait_queue_head_t waitq;
242 int (*aeq_handler)(struct bnxt_qplib_rcfw *,
243 void *, void *);
244 u32 seq_num;
246 /* Bar region info */
247 void __iomem *cmdq_bar_reg_iomem;
248 u16 cmdq_bar_reg;
249 u16 cmdq_bar_reg_prod_off;
250 u16 cmdq_bar_reg_trig_off;
251 u16 creq_ring_id;
252 u16 creq_bar_reg;
253 void __iomem *creq_bar_reg_iomem;
255 /* Cmd-Resp and Async Event notification queue */
256 struct bnxt_qplib_hwq creq;
257 u64 creq_qp_event_processed;
258 u64 creq_func_event_processed;
260 /* Actual Cmd and Resp Queues */
261 struct bnxt_qplib_hwq cmdq;
262 struct bnxt_qplib_crsq *crsqe_tbl;
263 int qp_tbl_size;
264 struct bnxt_qplib_qp_node *qp_tbl;
265 u64 oos_prev;
266 u32 init_oos_stats;
267 u32 cmdq_depth;
270 void bnxt_qplib_free_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
271 int bnxt_qplib_alloc_rcfw_channel(struct pci_dev *pdev,
272 struct bnxt_qplib_rcfw *rcfw,
273 struct bnxt_qplib_ctx *ctx,
274 int qp_tbl_sz);
275 void bnxt_qplib_rcfw_stop_irq(struct bnxt_qplib_rcfw *rcfw, bool kill);
276 void bnxt_qplib_disable_rcfw_channel(struct bnxt_qplib_rcfw *rcfw);
277 int bnxt_qplib_rcfw_start_irq(struct bnxt_qplib_rcfw *rcfw, int msix_vector,
278 bool need_init);
279 int bnxt_qplib_enable_rcfw_channel(struct pci_dev *pdev,
280 struct bnxt_qplib_rcfw *rcfw,
281 int msix_vector,
282 int cp_bar_reg_off, int virt_fn,
283 int (*aeq_handler)(struct bnxt_qplib_rcfw *,
284 void *aeqe, void *obj));
286 struct bnxt_qplib_rcfw_sbuf *bnxt_qplib_rcfw_alloc_sbuf(
287 struct bnxt_qplib_rcfw *rcfw,
288 u32 size);
289 void bnxt_qplib_rcfw_free_sbuf(struct bnxt_qplib_rcfw *rcfw,
290 struct bnxt_qplib_rcfw_sbuf *sbuf);
291 int bnxt_qplib_rcfw_send_message(struct bnxt_qplib_rcfw *rcfw,
292 struct cmdq_base *req, struct creq_base *resp,
293 void *sbuf, u8 is_block);
295 int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
296 int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
297 struct bnxt_qplib_ctx *ctx, int is_virtfn);
298 void bnxt_qplib_mark_qp_error(void *qp_handle);
299 #endif /* __BNXT_QPLIB_RCFW_H__ */