2 * Copyright (c) 2013-2015, Mellanox Technologies, Ltd. All rights reserved.
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 #include <linux/mlx5/driver.h>
35 #include "mlx5_core.h"
37 static u32
wq_get_byte_sz(u8 log_sz
, u8 log_stride
)
39 return ((u32
)1 << log_sz
) << log_stride
;
42 int mlx5_wq_cyc_create(struct mlx5_core_dev
*mdev
, struct mlx5_wq_param
*param
,
43 void *wqc
, struct mlx5_wq_cyc
*wq
,
44 struct mlx5_wq_ctrl
*wq_ctrl
)
46 u8 log_wq_stride
= MLX5_GET(wq
, wqc
, log_wq_stride
);
47 u8 log_wq_sz
= MLX5_GET(wq
, wqc
, log_wq_sz
);
48 struct mlx5_frag_buf_ctrl
*fbc
= &wq
->fbc
;
51 err
= mlx5_db_alloc_node(mdev
, &wq_ctrl
->db
, param
->db_numa_node
);
53 mlx5_core_warn(mdev
, "mlx5_db_alloc_node() failed, %d\n", err
);
57 wq
->db
= wq_ctrl
->db
.db
;
59 err
= mlx5_frag_buf_alloc_node(mdev
, wq_get_byte_sz(log_wq_sz
, log_wq_stride
),
60 &wq_ctrl
->buf
, param
->buf_numa_node
);
62 mlx5_core_warn(mdev
, "mlx5_frag_buf_alloc_node() failed, %d\n", err
);
66 mlx5_init_fbc(wq_ctrl
->buf
.frags
, log_wq_stride
, log_wq_sz
, fbc
);
67 wq
->sz
= mlx5_wq_cyc_get_size(wq
);
74 mlx5_db_free(mdev
, &wq_ctrl
->db
);
79 void mlx5_wq_cyc_wqe_dump(struct mlx5_wq_cyc
*wq
, u16 ix
, u8 nstrides
)
87 nstrides
= max_t(u8
, nstrides
, 1);
89 len
= nstrides
<< wq
->fbc
.log_stride
;
90 wqe
= mlx5_wq_cyc_get_wqe(wq
, ix
);
92 pr_info("WQE DUMP: WQ size %d WQ cur size %d, WQE index 0x%x, len: %zu\n",
93 mlx5_wq_cyc_get_size(wq
), wq
->cur_sz
, ix
, len
);
94 print_hex_dump(KERN_WARNING
, "", DUMP_PREFIX_OFFSET
, 16, 1, wqe
, len
, false);
97 void mlx5_wq_cyc_reset(struct mlx5_wq_cyc
*wq
)
101 mlx5_wq_cyc_update_db_record(wq
);
104 int mlx5_wq_qp_create(struct mlx5_core_dev
*mdev
, struct mlx5_wq_param
*param
,
105 void *qpc
, struct mlx5_wq_qp
*wq
,
106 struct mlx5_wq_ctrl
*wq_ctrl
)
108 u8 log_rq_stride
= MLX5_GET(qpc
, qpc
, log_rq_stride
) + 4;
109 u8 log_rq_sz
= MLX5_GET(qpc
, qpc
, log_rq_size
);
110 u8 log_sq_stride
= ilog2(MLX5_SEND_WQE_BB
);
111 u8 log_sq_sz
= MLX5_GET(qpc
, qpc
, log_sq_size
);
118 err
= mlx5_db_alloc_node(mdev
, &wq_ctrl
->db
, param
->db_numa_node
);
120 mlx5_core_warn(mdev
, "mlx5_db_alloc_node() failed, %d\n", err
);
124 err
= mlx5_frag_buf_alloc_node(mdev
,
125 wq_get_byte_sz(log_rq_sz
, log_rq_stride
) +
126 wq_get_byte_sz(log_sq_sz
, log_sq_stride
),
127 &wq_ctrl
->buf
, param
->buf_numa_node
);
129 mlx5_core_warn(mdev
, "mlx5_frag_buf_alloc_node() failed, %d\n", err
);
133 mlx5_init_fbc(wq_ctrl
->buf
.frags
, log_rq_stride
, log_rq_sz
, &wq
->rq
.fbc
);
135 rq_byte_size
= wq_get_byte_sz(log_rq_sz
, log_rq_stride
);
137 if (rq_byte_size
< PAGE_SIZE
) {
138 /* SQ starts within the same page of the RQ */
139 u16 sq_strides_offset
= rq_byte_size
/ MLX5_SEND_WQE_BB
;
141 mlx5_init_fbc_offset(wq_ctrl
->buf
.frags
,
142 log_sq_stride
, log_sq_sz
, sq_strides_offset
,
145 u16 rq_npages
= rq_byte_size
>> PAGE_SHIFT
;
147 mlx5_init_fbc(wq_ctrl
->buf
.frags
+ rq_npages
,
148 log_sq_stride
, log_sq_sz
, &wq
->sq
.fbc
);
151 wq
->rq
.db
= &wq_ctrl
->db
.db
[MLX5_RCV_DBR
];
152 wq
->sq
.db
= &wq_ctrl
->db
.db
[MLX5_SND_DBR
];
154 wq_ctrl
->mdev
= mdev
;
159 mlx5_db_free(mdev
, &wq_ctrl
->db
);
164 int mlx5_cqwq_create(struct mlx5_core_dev
*mdev
, struct mlx5_wq_param
*param
,
165 void *cqc
, struct mlx5_cqwq
*wq
,
166 struct mlx5_wq_ctrl
*wq_ctrl
)
168 /* CQE_STRIDE_128 and CQE_STRIDE_128_PAD both mean 128B stride */
169 u8 log_wq_stride
= MLX5_GET(cqc
, cqc
, cqe_sz
) == CQE_STRIDE_64
? 6 : 7;
170 u8 log_wq_sz
= MLX5_GET(cqc
, cqc
, log_cq_size
);
173 err
= mlx5_db_alloc_node(mdev
, &wq_ctrl
->db
, param
->db_numa_node
);
175 mlx5_core_warn(mdev
, "mlx5_db_alloc_node() failed, %d\n", err
);
179 wq
->db
= wq_ctrl
->db
.db
;
181 err
= mlx5_frag_buf_alloc_node(mdev
, wq_get_byte_sz(log_wq_sz
, log_wq_stride
),
183 param
->buf_numa_node
);
185 mlx5_core_warn(mdev
, "mlx5_frag_buf_alloc_node() failed, %d\n",
190 mlx5_init_fbc(wq_ctrl
->buf
.frags
, log_wq_stride
, log_wq_sz
, &wq
->fbc
);
192 wq_ctrl
->mdev
= mdev
;
197 mlx5_db_free(mdev
, &wq_ctrl
->db
);
202 static void mlx5_wq_ll_init_list(struct mlx5_wq_ll
*wq
)
204 struct mlx5_wqe_srq_next_seg
*next_seg
;
207 for (i
= 0; i
< wq
->fbc
.sz_m1
; i
++) {
208 next_seg
= mlx5_wq_ll_get_wqe(wq
, i
);
209 next_seg
->next_wqe_index
= cpu_to_be16(i
+ 1);
211 next_seg
= mlx5_wq_ll_get_wqe(wq
, i
);
212 wq
->tail_next
= &next_seg
->next_wqe_index
;
215 int mlx5_wq_ll_create(struct mlx5_core_dev
*mdev
, struct mlx5_wq_param
*param
,
216 void *wqc
, struct mlx5_wq_ll
*wq
,
217 struct mlx5_wq_ctrl
*wq_ctrl
)
219 u8 log_wq_stride
= MLX5_GET(wq
, wqc
, log_wq_stride
);
220 u8 log_wq_sz
= MLX5_GET(wq
, wqc
, log_wq_sz
);
221 struct mlx5_frag_buf_ctrl
*fbc
= &wq
->fbc
;
224 err
= mlx5_db_alloc_node(mdev
, &wq_ctrl
->db
, param
->db_numa_node
);
226 mlx5_core_warn(mdev
, "mlx5_db_alloc_node() failed, %d\n", err
);
230 wq
->db
= wq_ctrl
->db
.db
;
232 err
= mlx5_frag_buf_alloc_node(mdev
, wq_get_byte_sz(log_wq_sz
, log_wq_stride
),
233 &wq_ctrl
->buf
, param
->buf_numa_node
);
235 mlx5_core_warn(mdev
, "mlx5_frag_buf_alloc_node() failed, %d\n", err
);
239 mlx5_init_fbc(wq_ctrl
->buf
.frags
, log_wq_stride
, log_wq_sz
, fbc
);
241 mlx5_wq_ll_init_list(wq
);
242 wq_ctrl
->mdev
= mdev
;
247 mlx5_db_free(mdev
, &wq_ctrl
->db
);
252 void mlx5_wq_ll_reset(struct mlx5_wq_ll
*wq
)
257 mlx5_wq_ll_init_list(wq
);
258 mlx5_wq_ll_update_db_record(wq
);
261 void mlx5_wq_destroy(struct mlx5_wq_ctrl
*wq_ctrl
)
263 mlx5_frag_buf_free(wq_ctrl
->mdev
, &wq_ctrl
->buf
);
264 mlx5_db_free(wq_ctrl
->mdev
, &wq_ctrl
->db
);