drm/rockchip: Don't change hdmi reference clock rate
[drm/drm-misc.git] / drivers / net / ethernet / qlogic / qed / qed_cxt.h
blob168ce2c503859eed9457fe99d410dccbe5f15f24
1 /* SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause) */
2 /* QLogic qed NIC Driver
3 * Copyright (c) 2015-2017 QLogic Corporation
4 * Copyright (c) 2019-2020 Marvell International Ltd.
5 */
7 #ifndef _QED_CXT_H
8 #define _QED_CXT_H
10 #include <linux/types.h>
11 #include <linux/slab.h>
12 #include <linux/qed/qed_if.h>
13 #include "qed_hsi.h"
14 #include "qed.h"
16 struct qed_cxt_info {
17 void *p_cxt;
18 u32 iid;
19 enum protocol_type type;
22 #define MAX_TID_BLOCKS 512
23 struct qed_tid_mem {
24 u32 tid_size;
25 u32 num_tids_per_block;
26 u32 waste;
27 u8 *blocks[MAX_TID_BLOCKS]; /* 4K */
30 /**
31 * qed_cxt_get_cid_info(): Returns the context info for a specific cidi.
33 * @p_hwfn: HW device data.
34 * @p_info: In/out.
36 * Return: Int.
38 int qed_cxt_get_cid_info(struct qed_hwfn *p_hwfn,
39 struct qed_cxt_info *p_info);
41 /**
42 * qed_cxt_get_tid_mem_info(): Returns the tid mem info.
44 * @p_hwfn: HW device data.
45 * @p_info: in/out.
47 * Return: int.
49 int qed_cxt_get_tid_mem_info(struct qed_hwfn *p_hwfn,
50 struct qed_tid_mem *p_info);
52 #define QED_CXT_TCP_ULP_TID_SEG PROTOCOLID_TCP_ULP
53 #define QED_CXT_ROCE_TID_SEG PROTOCOLID_ROCE
54 #define QED_CXT_FCOE_TID_SEG PROTOCOLID_FCOE
55 enum qed_cxt_elem_type {
56 QED_ELEM_CXT,
57 QED_ELEM_SRQ,
58 QED_ELEM_TASK,
59 QED_ELEM_XRC_SRQ,
62 u32 qed_cxt_get_proto_cid_count(struct qed_hwfn *p_hwfn,
63 enum protocol_type type, u32 *vf_cid);
65 /**
66 * qed_cxt_set_pf_params(): Set the PF params for cxt init.
68 * @p_hwfn: HW device data.
69 * @rdma_tasks: Requested maximum.
71 * Return: int.
73 int qed_cxt_set_pf_params(struct qed_hwfn *p_hwfn, u32 rdma_tasks);
75 /**
76 * qed_cxt_cfg_ilt_compute(): Compute ILT init parameters.
78 * @p_hwfn: HW device data.
79 * @last_line: Last_line.
81 * Return: Int
83 int qed_cxt_cfg_ilt_compute(struct qed_hwfn *p_hwfn, u32 *last_line);
85 /**
86 * qed_cxt_cfg_ilt_compute_excess(): How many lines can be decreased.
88 * @p_hwfn: HW device data.
89 * @used_lines: Used lines.
91 * Return: Int.
93 u32 qed_cxt_cfg_ilt_compute_excess(struct qed_hwfn *p_hwfn, u32 used_lines);
95 /**
96 * qed_cxt_mngr_alloc(): Allocate and init the context manager struct.
98 * @p_hwfn: HW device data.
100 * Return: Int.
102 int qed_cxt_mngr_alloc(struct qed_hwfn *p_hwfn);
105 * qed_cxt_mngr_free() - Context manager free.
107 * @p_hwfn: HW device data.
109 * Return: Void.
111 void qed_cxt_mngr_free(struct qed_hwfn *p_hwfn);
114 * qed_cxt_tables_alloc(): Allocate ILT shadow, Searcher T2, acquired map.
116 * @p_hwfn: HW device data.
118 * Return: Int.
120 int qed_cxt_tables_alloc(struct qed_hwfn *p_hwfn);
123 * qed_cxt_mngr_setup(): Reset the acquired CIDs.
125 * @p_hwfn: HW device data.
127 void qed_cxt_mngr_setup(struct qed_hwfn *p_hwfn);
130 * qed_cxt_hw_init_common(): Initailze ILT and DQ, common phase, per path.
132 * @p_hwfn: HW device data.
134 * Return: Void.
136 void qed_cxt_hw_init_common(struct qed_hwfn *p_hwfn);
139 * qed_cxt_hw_init_pf(): Initailze ILT and DQ, PF phase, per path.
141 * @p_hwfn: HW device data.
142 * @p_ptt: P_ptt.
144 * Return: Void.
146 void qed_cxt_hw_init_pf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
149 * qed_qm_init_pf(): Initailze the QM PF phase, per path.
151 * @p_hwfn: HW device data.
152 * @p_ptt: P_ptt.
153 * @is_pf_loading: Is pf pending.
155 * Return: Void.
157 void qed_qm_init_pf(struct qed_hwfn *p_hwfn,
158 struct qed_ptt *p_ptt, bool is_pf_loading);
161 * qed_qm_reconf(): Reconfigures QM pf on the fly.
163 * @p_hwfn: HW device data.
164 * @p_ptt: P_ptt.
166 * Return: Int.
168 int qed_qm_reconf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
170 #define QED_CXT_PF_CID (0xff)
173 * qed_cxt_release_cid(): Release a cid.
175 * @p_hwfn: HW device data.
176 * @cid: Cid.
178 * Return: Void.
180 void qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid);
183 * _qed_cxt_release_cid(): Release a cid belonging to a vf-queue.
185 * @p_hwfn: HW device data.
186 * @cid: Cid.
187 * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
189 * Return: Void.
191 void _qed_cxt_release_cid(struct qed_hwfn *p_hwfn, u32 cid, u8 vfid);
194 * qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type.
196 * @p_hwfn: HW device data.
197 * @type: Type.
198 * @p_cid: Pointer cid.
200 * Return: Int.
202 int qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
203 enum protocol_type type, u32 *p_cid);
206 * _qed_cxt_acquire_cid(): Acquire a new cid of a specific protocol type
207 * for a vf-queue.
209 * @p_hwfn: HW device data.
210 * @type: Type.
211 * @p_cid: Pointer cid.
212 * @vfid: Engine relative index. QED_CXT_PF_CID if belongs to PF.
214 * Return: Int.
216 int _qed_cxt_acquire_cid(struct qed_hwfn *p_hwfn,
217 enum protocol_type type, u32 *p_cid, u8 vfid);
219 int qed_cxt_dynamic_ilt_alloc(struct qed_hwfn *p_hwfn,
220 enum qed_cxt_elem_type elem_type, u32 iid);
221 u32 qed_cxt_get_proto_tid_count(struct qed_hwfn *p_hwfn,
222 enum protocol_type type);
223 u32 qed_cxt_get_proto_cid_start(struct qed_hwfn *p_hwfn,
224 enum protocol_type type);
225 int qed_cxt_free_proto_ilt(struct qed_hwfn *p_hwfn, enum protocol_type proto);
227 #define QED_CTX_WORKING_MEM 0
228 #define QED_CTX_FL_MEM 1
229 int qed_cxt_get_task_ctx(struct qed_hwfn *p_hwfn,
230 u32 tid, u8 ctx_type, void **task_ctx);
232 /* Max number of connection types in HW (DQ/CDU etc.) */
233 #define MAX_CONN_TYPES PROTOCOLID_COMMON
234 #define NUM_TASK_TYPES 2
235 #define NUM_TASK_PF_SEGMENTS 4
236 #define NUM_TASK_VF_SEGMENTS 1
238 /* PF per protocl configuration object */
239 #define TASK_SEGMENTS (NUM_TASK_PF_SEGMENTS + NUM_TASK_VF_SEGMENTS)
240 #define TASK_SEGMENT_VF (NUM_TASK_PF_SEGMENTS)
242 struct qed_tid_seg {
243 u32 count;
244 u8 type;
245 bool has_fl_mem;
248 struct qed_conn_type_cfg {
249 u32 cid_count;
250 u32 cids_per_vf;
251 struct qed_tid_seg tid_seg[TASK_SEGMENTS];
254 /* ILT Client configuration,
255 * Per connection type (protocol) resources (cids, tis, vf cids etc.)
256 * 1 - for connection context (CDUC) and for each task context we need two
257 * values, for regular task context and for force load memory
259 #define ILT_CLI_PF_BLOCKS (1 + NUM_TASK_PF_SEGMENTS * 2)
260 #define ILT_CLI_VF_BLOCKS (1 + NUM_TASK_VF_SEGMENTS * 2)
261 #define CDUC_BLK (0)
262 #define SRQ_BLK (0)
263 #define CDUT_SEG_BLK(n) (1 + (u8)(n))
264 #define CDUT_FL_SEG_BLK(n, X) (1 + (n) + NUM_TASK_ ## X ## _SEGMENTS)
266 struct ilt_cfg_pair {
267 u32 reg;
268 u32 val;
271 struct qed_ilt_cli_blk {
272 u32 total_size; /* 0 means not active */
273 u32 real_size_in_page;
274 u32 start_line;
275 u32 dynamic_line_offset;
276 u32 dynamic_line_cnt;
279 struct qed_ilt_client_cfg {
280 bool active;
282 /* ILT boundaries */
283 struct ilt_cfg_pair first;
284 struct ilt_cfg_pair last;
285 struct ilt_cfg_pair p_size;
287 /* ILT client blocks for PF */
288 struct qed_ilt_cli_blk pf_blks[ILT_CLI_PF_BLOCKS];
289 u32 pf_total_lines;
291 /* ILT client blocks for VFs */
292 struct qed_ilt_cli_blk vf_blks[ILT_CLI_VF_BLOCKS];
293 u32 vf_total_lines;
296 struct qed_cid_acquired_map {
297 u32 start_cid;
298 u32 max_count;
299 unsigned long *cid_map;
302 struct qed_src_t2 {
303 struct phys_mem_desc *dma_mem;
304 u32 num_pages;
305 u64 first_free;
306 u64 last_free;
309 struct qed_cxt_mngr {
310 /* Per protocl configuration */
311 struct qed_conn_type_cfg conn_cfg[MAX_CONN_TYPES];
313 /* computed ILT structure */
314 struct qed_ilt_client_cfg clients[MAX_ILT_CLIENTS];
316 /* Task type sizes */
317 u32 task_type_size[NUM_TASK_TYPES];
319 /* total number of VFs for this hwfn -
320 * ALL VFs are symmetric in terms of HW resources
322 u32 vf_count;
323 u32 first_vf_in_pf;
325 /* Acquired CIDs */
326 struct qed_cid_acquired_map acquired[MAX_CONN_TYPES];
328 struct qed_cid_acquired_map
329 acquired_vf[MAX_CONN_TYPES][MAX_NUM_VFS];
331 /* ILT shadow table */
332 struct phys_mem_desc *ilt_shadow;
333 u32 ilt_shadow_size;
334 u32 pf_start_line;
336 /* Mutex for a dynamic ILT allocation */
337 struct mutex mutex;
339 /* SRC T2 */
340 struct qed_src_t2 src_t2;
342 /* total number of SRQ's for this hwfn */
343 u32 srq_count;
344 u32 xrc_srq_count;
346 /* Maximal number of L2 steering filters */
347 u32 arfs_count;
349 u16 iscsi_task_pages;
350 u16 fcoe_task_pages;
351 u16 roce_task_pages;
352 u16 eth_task_pages;
353 u16 task_ctx_size;
354 u16 conn_ctx_size;
357 u16 qed_get_cdut_num_pf_init_pages(struct qed_hwfn *p_hwfn);
358 u16 qed_get_cdut_num_vf_init_pages(struct qed_hwfn *p_hwfn);
359 u16 qed_get_cdut_num_pf_work_pages(struct qed_hwfn *p_hwfn);
360 u16 qed_get_cdut_num_vf_work_pages(struct qed_hwfn *p_hwfn);
362 u32 qed_cxt_get_ilt_page_size(struct qed_hwfn *p_hwfn,
363 enum ilt_clients ilt_client);
365 u32 qed_cxt_get_total_srq_count(struct qed_hwfn *p_hwfn);
367 #endif