1 /* This file is part of the Emulex RoCE Device Driver for
2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * EMULEX and SLI are trademarks of Emulex.
7 * This software is available to you under a choice of one of two licenses.
8 * You may choose to be licensed under the terms of the GNU General Public
9 * License (GPL) Version 2, available from the file COPYING in the main
10 * directory of this source tree, or the BSD license below:
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
16 * - Redistributions of source code must retain the above copyright notice,
17 * this list of conditions and the following disclaimer.
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 * Contact Information:
36 * linux-drivers@emulex.com
40 * Costa Mesa, CA 92626
43 #include <rdma/ib_addr.h>
44 #include <rdma/ib_pma.h>
45 #include "ocrdma_stats.h"
47 static struct dentry
*ocrdma_dbgfs_dir
;
49 static int ocrdma_add_stat(char *start
, char *pcur
,
50 char *name
, u64 count
)
55 snprintf(buff
, 128, "%s: %llu\n", name
, count
);
56 cpy_len
= strlen(buff
);
58 if (pcur
+ cpy_len
> start
+ OCRDMA_MAX_DBGFS_MEM
) {
59 pr_err("%s: No space in stats buff\n", __func__
);
63 memcpy(pcur
, buff
, cpy_len
);
67 bool ocrdma_alloc_stats_resources(struct ocrdma_dev
*dev
)
69 struct stats_mem
*mem
= &dev
->stats_mem
;
71 mutex_init(&dev
->stats_lock
);
72 /* Alloc mbox command mem*/
73 mem
->size
= max_t(u32
, sizeof(struct ocrdma_rdma_stats_req
),
74 sizeof(struct ocrdma_rdma_stats_resp
));
76 mem
->va
= dma_alloc_coherent(&dev
->nic_info
.pdev
->dev
, mem
->size
,
77 &mem
->pa
, GFP_KERNEL
);
79 pr_err("%s: stats mbox allocation failed\n", __func__
);
83 memset(mem
->va
, 0, mem
->size
);
85 /* Alloc debugfs mem */
86 mem
->debugfs_mem
= kzalloc(OCRDMA_MAX_DBGFS_MEM
, GFP_KERNEL
);
87 if (!mem
->debugfs_mem
) {
88 pr_err("%s: stats debugfs mem allocation failed\n", __func__
);
95 void ocrdma_release_stats_resources(struct ocrdma_dev
*dev
)
97 struct stats_mem
*mem
= &dev
->stats_mem
;
100 dma_free_coherent(&dev
->nic_info
.pdev
->dev
, mem
->size
,
103 kfree(mem
->debugfs_mem
);
106 static char *ocrdma_resource_stats(struct ocrdma_dev
*dev
)
108 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
109 struct ocrdma_rdma_stats_resp
*rdma_stats
=
110 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
111 struct ocrdma_rsrc_stats
*rsrc_stats
= &rdma_stats
->act_rsrc_stats
;
113 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
116 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_dpp_pds",
117 (u64
)rsrc_stats
->dpp_pds
);
118 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_non_dpp_pds",
119 (u64
)rsrc_stats
->non_dpp_pds
);
120 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_rc_dpp_qps",
121 (u64
)rsrc_stats
->rc_dpp_qps
);
122 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_uc_dpp_qps",
123 (u64
)rsrc_stats
->uc_dpp_qps
);
124 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_ud_dpp_qps",
125 (u64
)rsrc_stats
->ud_dpp_qps
);
126 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_rc_non_dpp_qps",
127 (u64
)rsrc_stats
->rc_non_dpp_qps
);
128 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_uc_non_dpp_qps",
129 (u64
)rsrc_stats
->uc_non_dpp_qps
);
130 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_ud_non_dpp_qps",
131 (u64
)rsrc_stats
->ud_non_dpp_qps
);
132 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_srqs",
133 (u64
)rsrc_stats
->srqs
);
134 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_rbqs",
135 (u64
)rsrc_stats
->rbqs
);
136 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_64K_nsmr",
137 (u64
)rsrc_stats
->r64K_nsmr
);
138 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_64K_to_2M_nsmr",
139 (u64
)rsrc_stats
->r64K_to_2M_nsmr
);
140 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_2M_to_44M_nsmr",
141 (u64
)rsrc_stats
->r2M_to_44M_nsmr
);
142 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_44M_to_1G_nsmr",
143 (u64
)rsrc_stats
->r44M_to_1G_nsmr
);
144 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_1G_to_4G_nsmr",
145 (u64
)rsrc_stats
->r1G_to_4G_nsmr
);
146 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_nsmr_count_4G_to_32G",
147 (u64
)rsrc_stats
->nsmr_count_4G_to_32G
);
148 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_32G_to_64G_nsmr",
149 (u64
)rsrc_stats
->r32G_to_64G_nsmr
);
150 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_64G_to_128G_nsmr",
151 (u64
)rsrc_stats
->r64G_to_128G_nsmr
);
152 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_128G_to_higher_nsmr",
153 (u64
)rsrc_stats
->r128G_to_higher_nsmr
);
154 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_embedded_nsmr",
155 (u64
)rsrc_stats
->embedded_nsmr
);
156 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_frmr",
157 (u64
)rsrc_stats
->frmr
);
158 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_prefetch_qps",
159 (u64
)rsrc_stats
->prefetch_qps
);
160 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_ondemand_qps",
161 (u64
)rsrc_stats
->ondemand_qps
);
162 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_phy_mr",
163 (u64
)rsrc_stats
->phy_mr
);
164 pcur
+= ocrdma_add_stat(stats
, pcur
, "active_mw",
165 (u64
)rsrc_stats
->mw
);
167 /* Print the threshold stats */
168 rsrc_stats
= &rdma_stats
->th_rsrc_stats
;
170 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_dpp_pds",
171 (u64
)rsrc_stats
->dpp_pds
);
172 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_non_dpp_pds",
173 (u64
)rsrc_stats
->non_dpp_pds
);
174 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_rc_dpp_qps",
175 (u64
)rsrc_stats
->rc_dpp_qps
);
176 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_uc_dpp_qps",
177 (u64
)rsrc_stats
->uc_dpp_qps
);
178 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_ud_dpp_qps",
179 (u64
)rsrc_stats
->ud_dpp_qps
);
180 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_rc_non_dpp_qps",
181 (u64
)rsrc_stats
->rc_non_dpp_qps
);
182 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_uc_non_dpp_qps",
183 (u64
)rsrc_stats
->uc_non_dpp_qps
);
184 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_ud_non_dpp_qps",
185 (u64
)rsrc_stats
->ud_non_dpp_qps
);
186 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_srqs",
187 (u64
)rsrc_stats
->srqs
);
188 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_rbqs",
189 (u64
)rsrc_stats
->rbqs
);
190 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_64K_nsmr",
191 (u64
)rsrc_stats
->r64K_nsmr
);
192 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_64K_to_2M_nsmr",
193 (u64
)rsrc_stats
->r64K_to_2M_nsmr
);
194 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_2M_to_44M_nsmr",
195 (u64
)rsrc_stats
->r2M_to_44M_nsmr
);
196 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_44M_to_1G_nsmr",
197 (u64
)rsrc_stats
->r44M_to_1G_nsmr
);
198 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_1G_to_4G_nsmr",
199 (u64
)rsrc_stats
->r1G_to_4G_nsmr
);
200 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_nsmr_count_4G_to_32G",
201 (u64
)rsrc_stats
->nsmr_count_4G_to_32G
);
202 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_32G_to_64G_nsmr",
203 (u64
)rsrc_stats
->r32G_to_64G_nsmr
);
204 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_64G_to_128G_nsmr",
205 (u64
)rsrc_stats
->r64G_to_128G_nsmr
);
206 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_128G_to_higher_nsmr",
207 (u64
)rsrc_stats
->r128G_to_higher_nsmr
);
208 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_embedded_nsmr",
209 (u64
)rsrc_stats
->embedded_nsmr
);
210 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_frmr",
211 (u64
)rsrc_stats
->frmr
);
212 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_prefetch_qps",
213 (u64
)rsrc_stats
->prefetch_qps
);
214 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_ondemand_qps",
215 (u64
)rsrc_stats
->ondemand_qps
);
216 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_phy_mr",
217 (u64
)rsrc_stats
->phy_mr
);
218 pcur
+= ocrdma_add_stat(stats
, pcur
, "threshold_mw",
219 (u64
)rsrc_stats
->mw
);
223 static char *ocrdma_rx_stats(struct ocrdma_dev
*dev
)
225 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
226 struct ocrdma_rdma_stats_resp
*rdma_stats
=
227 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
228 struct ocrdma_rx_stats
*rx_stats
= &rdma_stats
->rx_stats
;
230 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
233 pcur
+= ocrdma_add_stat
234 (stats
, pcur
, "roce_frame_bytes",
235 convert_to_64bit(rx_stats
->roce_frame_bytes_lo
,
236 rx_stats
->roce_frame_bytes_hi
));
237 pcur
+= ocrdma_add_stat(stats
, pcur
, "roce_frame_icrc_drops",
238 (u64
)rx_stats
->roce_frame_icrc_drops
);
239 pcur
+= ocrdma_add_stat(stats
, pcur
, "roce_frame_payload_len_drops",
240 (u64
)rx_stats
->roce_frame_payload_len_drops
);
241 pcur
+= ocrdma_add_stat(stats
, pcur
, "ud_drops",
242 (u64
)rx_stats
->ud_drops
);
243 pcur
+= ocrdma_add_stat(stats
, pcur
, "qp1_drops",
244 (u64
)rx_stats
->qp1_drops
);
245 pcur
+= ocrdma_add_stat(stats
, pcur
, "psn_error_request_packets",
246 (u64
)rx_stats
->psn_error_request_packets
);
247 pcur
+= ocrdma_add_stat(stats
, pcur
, "psn_error_resp_packets",
248 (u64
)rx_stats
->psn_error_resp_packets
);
249 pcur
+= ocrdma_add_stat(stats
, pcur
, "rnr_nak_timeouts",
250 (u64
)rx_stats
->rnr_nak_timeouts
);
251 pcur
+= ocrdma_add_stat(stats
, pcur
, "rnr_nak_receives",
252 (u64
)rx_stats
->rnr_nak_receives
);
253 pcur
+= ocrdma_add_stat(stats
, pcur
, "roce_frame_rxmt_drops",
254 (u64
)rx_stats
->roce_frame_rxmt_drops
);
255 pcur
+= ocrdma_add_stat(stats
, pcur
, "nak_count_psn_sequence_errors",
256 (u64
)rx_stats
->nak_count_psn_sequence_errors
);
257 pcur
+= ocrdma_add_stat(stats
, pcur
, "rc_drop_count_lookup_errors",
258 (u64
)rx_stats
->rc_drop_count_lookup_errors
);
259 pcur
+= ocrdma_add_stat(stats
, pcur
, "rq_rnr_naks",
260 (u64
)rx_stats
->rq_rnr_naks
);
261 pcur
+= ocrdma_add_stat(stats
, pcur
, "srq_rnr_naks",
262 (u64
)rx_stats
->srq_rnr_naks
);
263 pcur
+= ocrdma_add_stat(stats
, pcur
, "roce_frames",
264 convert_to_64bit(rx_stats
->roce_frames_lo
,
265 rx_stats
->roce_frames_hi
));
270 static u64
ocrdma_sysfs_rcv_pkts(struct ocrdma_dev
*dev
)
272 struct ocrdma_rdma_stats_resp
*rdma_stats
=
273 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
274 struct ocrdma_rx_stats
*rx_stats
= &rdma_stats
->rx_stats
;
276 return convert_to_64bit(rx_stats
->roce_frames_lo
,
277 rx_stats
->roce_frames_hi
) + (u64
)rx_stats
->roce_frame_icrc_drops
278 + (u64
)rx_stats
->roce_frame_payload_len_drops
;
281 static u64
ocrdma_sysfs_rcv_data(struct ocrdma_dev
*dev
)
283 struct ocrdma_rdma_stats_resp
*rdma_stats
=
284 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
285 struct ocrdma_rx_stats
*rx_stats
= &rdma_stats
->rx_stats
;
287 return (convert_to_64bit(rx_stats
->roce_frame_bytes_lo
,
288 rx_stats
->roce_frame_bytes_hi
))/4;
291 static char *ocrdma_tx_stats(struct ocrdma_dev
*dev
)
293 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
294 struct ocrdma_rdma_stats_resp
*rdma_stats
=
295 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
296 struct ocrdma_tx_stats
*tx_stats
= &rdma_stats
->tx_stats
;
298 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
301 pcur
+= ocrdma_add_stat(stats
, pcur
, "send_pkts",
302 convert_to_64bit(tx_stats
->send_pkts_lo
,
303 tx_stats
->send_pkts_hi
));
304 pcur
+= ocrdma_add_stat(stats
, pcur
, "write_pkts",
305 convert_to_64bit(tx_stats
->write_pkts_lo
,
306 tx_stats
->write_pkts_hi
));
307 pcur
+= ocrdma_add_stat(stats
, pcur
, "read_pkts",
308 convert_to_64bit(tx_stats
->read_pkts_lo
,
309 tx_stats
->read_pkts_hi
));
310 pcur
+= ocrdma_add_stat(stats
, pcur
, "read_rsp_pkts",
311 convert_to_64bit(tx_stats
->read_rsp_pkts_lo
,
312 tx_stats
->read_rsp_pkts_hi
));
313 pcur
+= ocrdma_add_stat(stats
, pcur
, "ack_pkts",
314 convert_to_64bit(tx_stats
->ack_pkts_lo
,
315 tx_stats
->ack_pkts_hi
));
316 pcur
+= ocrdma_add_stat(stats
, pcur
, "send_bytes",
317 convert_to_64bit(tx_stats
->send_bytes_lo
,
318 tx_stats
->send_bytes_hi
));
319 pcur
+= ocrdma_add_stat(stats
, pcur
, "write_bytes",
320 convert_to_64bit(tx_stats
->write_bytes_lo
,
321 tx_stats
->write_bytes_hi
));
322 pcur
+= ocrdma_add_stat(stats
, pcur
, "read_req_bytes",
323 convert_to_64bit(tx_stats
->read_req_bytes_lo
,
324 tx_stats
->read_req_bytes_hi
));
325 pcur
+= ocrdma_add_stat(stats
, pcur
, "read_rsp_bytes",
326 convert_to_64bit(tx_stats
->read_rsp_bytes_lo
,
327 tx_stats
->read_rsp_bytes_hi
));
328 pcur
+= ocrdma_add_stat(stats
, pcur
, "ack_timeouts",
329 (u64
)tx_stats
->ack_timeouts
);
334 static u64
ocrdma_sysfs_xmit_pkts(struct ocrdma_dev
*dev
)
336 struct ocrdma_rdma_stats_resp
*rdma_stats
=
337 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
338 struct ocrdma_tx_stats
*tx_stats
= &rdma_stats
->tx_stats
;
340 return (convert_to_64bit(tx_stats
->send_pkts_lo
,
341 tx_stats
->send_pkts_hi
) +
342 convert_to_64bit(tx_stats
->write_pkts_lo
, tx_stats
->write_pkts_hi
) +
343 convert_to_64bit(tx_stats
->read_pkts_lo
, tx_stats
->read_pkts_hi
) +
344 convert_to_64bit(tx_stats
->read_rsp_pkts_lo
,
345 tx_stats
->read_rsp_pkts_hi
) +
346 convert_to_64bit(tx_stats
->ack_pkts_lo
, tx_stats
->ack_pkts_hi
));
349 static u64
ocrdma_sysfs_xmit_data(struct ocrdma_dev
*dev
)
351 struct ocrdma_rdma_stats_resp
*rdma_stats
=
352 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
353 struct ocrdma_tx_stats
*tx_stats
= &rdma_stats
->tx_stats
;
355 return (convert_to_64bit(tx_stats
->send_bytes_lo
,
356 tx_stats
->send_bytes_hi
) +
357 convert_to_64bit(tx_stats
->write_bytes_lo
,
358 tx_stats
->write_bytes_hi
) +
359 convert_to_64bit(tx_stats
->read_req_bytes_lo
,
360 tx_stats
->read_req_bytes_hi
) +
361 convert_to_64bit(tx_stats
->read_rsp_bytes_lo
,
362 tx_stats
->read_rsp_bytes_hi
))/4;
365 static char *ocrdma_wqe_stats(struct ocrdma_dev
*dev
)
367 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
368 struct ocrdma_rdma_stats_resp
*rdma_stats
=
369 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
370 struct ocrdma_wqe_stats
*wqe_stats
= &rdma_stats
->wqe_stats
;
372 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
375 pcur
+= ocrdma_add_stat(stats
, pcur
, "large_send_rc_wqes",
376 convert_to_64bit(wqe_stats
->large_send_rc_wqes_lo
,
377 wqe_stats
->large_send_rc_wqes_hi
));
378 pcur
+= ocrdma_add_stat(stats
, pcur
, "large_write_rc_wqes",
379 convert_to_64bit(wqe_stats
->large_write_rc_wqes_lo
,
380 wqe_stats
->large_write_rc_wqes_hi
));
381 pcur
+= ocrdma_add_stat(stats
, pcur
, "read_wqes",
382 convert_to_64bit(wqe_stats
->read_wqes_lo
,
383 wqe_stats
->read_wqes_hi
));
384 pcur
+= ocrdma_add_stat(stats
, pcur
, "frmr_wqes",
385 convert_to_64bit(wqe_stats
->frmr_wqes_lo
,
386 wqe_stats
->frmr_wqes_hi
));
387 pcur
+= ocrdma_add_stat(stats
, pcur
, "mw_bind_wqes",
388 convert_to_64bit(wqe_stats
->mw_bind_wqes_lo
,
389 wqe_stats
->mw_bind_wqes_hi
));
390 pcur
+= ocrdma_add_stat(stats
, pcur
, "invalidate_wqes",
391 convert_to_64bit(wqe_stats
->invalidate_wqes_lo
,
392 wqe_stats
->invalidate_wqes_hi
));
393 pcur
+= ocrdma_add_stat(stats
, pcur
, "dpp_wqe_drops",
394 (u64
)wqe_stats
->dpp_wqe_drops
);
398 static char *ocrdma_db_errstats(struct ocrdma_dev
*dev
)
400 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
401 struct ocrdma_rdma_stats_resp
*rdma_stats
=
402 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
403 struct ocrdma_db_err_stats
*db_err_stats
= &rdma_stats
->db_err_stats
;
405 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
408 pcur
+= ocrdma_add_stat(stats
, pcur
, "sq_doorbell_errors",
409 (u64
)db_err_stats
->sq_doorbell_errors
);
410 pcur
+= ocrdma_add_stat(stats
, pcur
, "cq_doorbell_errors",
411 (u64
)db_err_stats
->cq_doorbell_errors
);
412 pcur
+= ocrdma_add_stat(stats
, pcur
, "rq_srq_doorbell_errors",
413 (u64
)db_err_stats
->rq_srq_doorbell_errors
);
414 pcur
+= ocrdma_add_stat(stats
, pcur
, "cq_overflow_errors",
415 (u64
)db_err_stats
->cq_overflow_errors
);
419 static char *ocrdma_rxqp_errstats(struct ocrdma_dev
*dev
)
421 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
422 struct ocrdma_rdma_stats_resp
*rdma_stats
=
423 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
424 struct ocrdma_rx_qp_err_stats
*rx_qp_err_stats
=
425 &rdma_stats
->rx_qp_err_stats
;
427 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
430 pcur
+= ocrdma_add_stat(stats
, pcur
, "nak_invalid_requst_errors",
431 (u64
)rx_qp_err_stats
->nak_invalid_requst_errors
);
432 pcur
+= ocrdma_add_stat(stats
, pcur
, "nak_remote_operation_errors",
433 (u64
)rx_qp_err_stats
->nak_remote_operation_errors
);
434 pcur
+= ocrdma_add_stat(stats
, pcur
, "nak_count_remote_access_errors",
435 (u64
)rx_qp_err_stats
->nak_count_remote_access_errors
);
436 pcur
+= ocrdma_add_stat(stats
, pcur
, "local_length_errors",
437 (u64
)rx_qp_err_stats
->local_length_errors
);
438 pcur
+= ocrdma_add_stat(stats
, pcur
, "local_protection_errors",
439 (u64
)rx_qp_err_stats
->local_protection_errors
);
440 pcur
+= ocrdma_add_stat(stats
, pcur
, "local_qp_operation_errors",
441 (u64
)rx_qp_err_stats
->local_qp_operation_errors
);
445 static char *ocrdma_txqp_errstats(struct ocrdma_dev
*dev
)
447 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
448 struct ocrdma_rdma_stats_resp
*rdma_stats
=
449 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
450 struct ocrdma_tx_qp_err_stats
*tx_qp_err_stats
=
451 &rdma_stats
->tx_qp_err_stats
;
453 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
456 pcur
+= ocrdma_add_stat(stats
, pcur
, "local_length_errors",
457 (u64
)tx_qp_err_stats
->local_length_errors
);
458 pcur
+= ocrdma_add_stat(stats
, pcur
, "local_protection_errors",
459 (u64
)tx_qp_err_stats
->local_protection_errors
);
460 pcur
+= ocrdma_add_stat(stats
, pcur
, "local_qp_operation_errors",
461 (u64
)tx_qp_err_stats
->local_qp_operation_errors
);
462 pcur
+= ocrdma_add_stat(stats
, pcur
, "retry_count_exceeded_errors",
463 (u64
)tx_qp_err_stats
->retry_count_exceeded_errors
);
464 pcur
+= ocrdma_add_stat(stats
, pcur
, "rnr_retry_count_exceeded_errors",
465 (u64
)tx_qp_err_stats
->rnr_retry_count_exceeded_errors
);
469 static char *ocrdma_tx_dbg_stats(struct ocrdma_dev
*dev
)
472 char *pstats
= dev
->stats_mem
.debugfs_mem
;
473 struct ocrdma_rdma_stats_resp
*rdma_stats
=
474 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
475 struct ocrdma_tx_dbg_stats
*tx_dbg_stats
=
476 &rdma_stats
->tx_dbg_stats
;
478 memset(pstats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
480 for (i
= 0; i
< 100; i
++)
481 pstats
+= snprintf(pstats
, 80, "DW[%d] = 0x%x\n", i
,
482 tx_dbg_stats
->data
[i
]);
484 return dev
->stats_mem
.debugfs_mem
;
487 static char *ocrdma_rx_dbg_stats(struct ocrdma_dev
*dev
)
490 char *pstats
= dev
->stats_mem
.debugfs_mem
;
491 struct ocrdma_rdma_stats_resp
*rdma_stats
=
492 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
493 struct ocrdma_rx_dbg_stats
*rx_dbg_stats
=
494 &rdma_stats
->rx_dbg_stats
;
496 memset(pstats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
498 for (i
= 0; i
< 200; i
++)
499 pstats
+= snprintf(pstats
, 80, "DW[%d] = 0x%x\n", i
,
500 rx_dbg_stats
->data
[i
]);
502 return dev
->stats_mem
.debugfs_mem
;
505 static char *ocrdma_driver_dbg_stats(struct ocrdma_dev
*dev
)
507 char *stats
= dev
->stats_mem
.debugfs_mem
, *pcur
;
510 memset(stats
, 0, (OCRDMA_MAX_DBGFS_MEM
));
513 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_cq_err",
514 (u64
)(dev
->async_err_stats
515 [OCRDMA_CQ_ERROR
].counter
));
516 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_cq_overrun_err",
517 (u64
)dev
->async_err_stats
518 [OCRDMA_CQ_OVERRUN_ERROR
].counter
);
519 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_cq_qpcat_err",
520 (u64
)dev
->async_err_stats
521 [OCRDMA_CQ_QPCAT_ERROR
].counter
);
522 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_qp_access_err",
523 (u64
)dev
->async_err_stats
524 [OCRDMA_QP_ACCESS_ERROR
].counter
);
525 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_qp_commm_est_evt",
526 (u64
)dev
->async_err_stats
527 [OCRDMA_QP_COMM_EST_EVENT
].counter
);
528 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_sq_drained_evt",
529 (u64
)dev
->async_err_stats
530 [OCRDMA_SQ_DRAINED_EVENT
].counter
);
531 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_dev_fatal_evt",
532 (u64
)dev
->async_err_stats
533 [OCRDMA_DEVICE_FATAL_EVENT
].counter
);
534 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_srqcat_err",
535 (u64
)dev
->async_err_stats
536 [OCRDMA_SRQCAT_ERROR
].counter
);
537 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_srq_limit_evt",
538 (u64
)dev
->async_err_stats
539 [OCRDMA_SRQ_LIMIT_EVENT
].counter
);
540 pcur
+= ocrdma_add_stat(stats
, pcur
, "async_qp_last_wqe_evt",
541 (u64
)dev
->async_err_stats
542 [OCRDMA_QP_LAST_WQE_EVENT
].counter
);
544 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_loc_len_err",
545 (u64
)dev
->cqe_err_stats
546 [OCRDMA_CQE_LOC_LEN_ERR
].counter
);
547 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_loc_qp_op_err",
548 (u64
)dev
->cqe_err_stats
549 [OCRDMA_CQE_LOC_QP_OP_ERR
].counter
);
550 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_loc_eec_op_err",
551 (u64
)dev
->cqe_err_stats
552 [OCRDMA_CQE_LOC_EEC_OP_ERR
].counter
);
553 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_loc_prot_err",
554 (u64
)dev
->cqe_err_stats
555 [OCRDMA_CQE_LOC_PROT_ERR
].counter
);
556 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_wr_flush_err",
557 (u64
)dev
->cqe_err_stats
558 [OCRDMA_CQE_WR_FLUSH_ERR
].counter
);
559 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_mw_bind_err",
560 (u64
)dev
->cqe_err_stats
561 [OCRDMA_CQE_MW_BIND_ERR
].counter
);
562 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_bad_resp_err",
563 (u64
)dev
->cqe_err_stats
564 [OCRDMA_CQE_BAD_RESP_ERR
].counter
);
565 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_loc_access_err",
566 (u64
)dev
->cqe_err_stats
567 [OCRDMA_CQE_LOC_ACCESS_ERR
].counter
);
568 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_rem_inv_req_err",
569 (u64
)dev
->cqe_err_stats
570 [OCRDMA_CQE_REM_INV_REQ_ERR
].counter
);
571 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_rem_access_err",
572 (u64
)dev
->cqe_err_stats
573 [OCRDMA_CQE_REM_ACCESS_ERR
].counter
);
574 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_rem_op_err",
575 (u64
)dev
->cqe_err_stats
576 [OCRDMA_CQE_REM_OP_ERR
].counter
);
577 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_retry_exc_err",
578 (u64
)dev
->cqe_err_stats
579 [OCRDMA_CQE_RETRY_EXC_ERR
].counter
);
580 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_rnr_retry_exc_err",
581 (u64
)dev
->cqe_err_stats
582 [OCRDMA_CQE_RNR_RETRY_EXC_ERR
].counter
);
583 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_loc_rdd_viol_err",
584 (u64
)dev
->cqe_err_stats
585 [OCRDMA_CQE_LOC_RDD_VIOL_ERR
].counter
);
586 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_rem_inv_rd_req_err",
587 (u64
)dev
->cqe_err_stats
588 [OCRDMA_CQE_REM_INV_RD_REQ_ERR
].counter
);
589 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_rem_abort_err",
590 (u64
)dev
->cqe_err_stats
591 [OCRDMA_CQE_REM_ABORT_ERR
].counter
);
592 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_inv_eecn_err",
593 (u64
)dev
->cqe_err_stats
594 [OCRDMA_CQE_INV_EECN_ERR
].counter
);
595 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_inv_eec_state_err",
596 (u64
)dev
->cqe_err_stats
597 [OCRDMA_CQE_INV_EEC_STATE_ERR
].counter
);
598 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_fatal_err",
599 (u64
)dev
->cqe_err_stats
600 [OCRDMA_CQE_FATAL_ERR
].counter
);
601 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_resp_timeout_err",
602 (u64
)dev
->cqe_err_stats
603 [OCRDMA_CQE_RESP_TIMEOUT_ERR
].counter
);
604 pcur
+= ocrdma_add_stat(stats
, pcur
, "cqe_general_err",
605 (u64
)dev
->cqe_err_stats
606 [OCRDMA_CQE_GENERAL_ERR
].counter
);
610 static void ocrdma_update_stats(struct ocrdma_dev
*dev
)
612 ulong now
= jiffies
, secs
;
614 struct ocrdma_rdma_stats_resp
*rdma_stats
=
615 (struct ocrdma_rdma_stats_resp
*)dev
->stats_mem
.va
;
616 struct ocrdma_rsrc_stats
*rsrc_stats
= &rdma_stats
->act_rsrc_stats
;
618 secs
= jiffies_to_msecs(now
- dev
->last_stats_time
) / 1000U;
621 status
= ocrdma_mbx_rdma_stats(dev
, false);
623 pr_err("%s: stats mbox failed with status = %d\n",
625 /* Update PD counters from PD resource manager */
626 if (dev
->pd_mgr
->pd_prealloc_valid
) {
627 rsrc_stats
->dpp_pds
= dev
->pd_mgr
->pd_dpp_count
;
628 rsrc_stats
->non_dpp_pds
= dev
->pd_mgr
->pd_norm_count
;
630 rsrc_stats
= &rdma_stats
->th_rsrc_stats
;
631 rsrc_stats
->dpp_pds
= dev
->pd_mgr
->pd_dpp_thrsh
;
632 rsrc_stats
->non_dpp_pds
= dev
->pd_mgr
->pd_norm_thrsh
;
634 dev
->last_stats_time
= jiffies
;
638 static ssize_t
ocrdma_dbgfs_ops_write(struct file
*filp
,
639 const char __user
*buffer
,
640 size_t count
, loff_t
*ppos
)
645 struct ocrdma_stats
*pstats
= filp
->private_data
;
646 struct ocrdma_dev
*dev
= pstats
->dev
;
651 if (copy_from_user(tmp_str
, buffer
, count
))
654 tmp_str
[count
-1] = '\0';
655 if (kstrtol(tmp_str
, 10, &reset
))
658 switch (pstats
->type
) {
659 case OCRDMA_RESET_STATS
:
661 status
= ocrdma_mbx_rdma_stats(dev
, true);
663 pr_err("Failed to reset stats = %d", status
);
677 int ocrdma_pma_counters(struct ocrdma_dev
*dev
,
678 struct ib_mad
*out_mad
)
680 struct ib_pma_portcounters
*pma_cnt
;
682 memset(out_mad
->data
, 0, sizeof out_mad
->data
);
683 pma_cnt
= (void *)(out_mad
->data
+ 40);
684 ocrdma_update_stats(dev
);
686 pma_cnt
->port_xmit_data
= cpu_to_be32(ocrdma_sysfs_xmit_data(dev
));
687 pma_cnt
->port_rcv_data
= cpu_to_be32(ocrdma_sysfs_rcv_data(dev
));
688 pma_cnt
->port_xmit_packets
= cpu_to_be32(ocrdma_sysfs_xmit_pkts(dev
));
689 pma_cnt
->port_rcv_packets
= cpu_to_be32(ocrdma_sysfs_rcv_pkts(dev
));
693 static ssize_t
ocrdma_dbgfs_ops_read(struct file
*filp
, char __user
*buffer
,
694 size_t usr_buf_len
, loff_t
*ppos
)
696 struct ocrdma_stats
*pstats
= filp
->private_data
;
697 struct ocrdma_dev
*dev
= pstats
->dev
;
701 /* No partial reads */
705 mutex_lock(&dev
->stats_lock
);
707 ocrdma_update_stats(dev
);
709 switch (pstats
->type
) {
710 case OCRDMA_RSRC_STATS
:
711 data
= ocrdma_resource_stats(dev
);
714 data
= ocrdma_rx_stats(dev
);
716 case OCRDMA_WQESTATS
:
717 data
= ocrdma_wqe_stats(dev
);
720 data
= ocrdma_tx_stats(dev
);
722 case OCRDMA_DB_ERRSTATS
:
723 data
= ocrdma_db_errstats(dev
);
725 case OCRDMA_RXQP_ERRSTATS
:
726 data
= ocrdma_rxqp_errstats(dev
);
728 case OCRDMA_TXQP_ERRSTATS
:
729 data
= ocrdma_txqp_errstats(dev
);
731 case OCRDMA_TX_DBG_STATS
:
732 data
= ocrdma_tx_dbg_stats(dev
);
734 case OCRDMA_RX_DBG_STATS
:
735 data
= ocrdma_rx_dbg_stats(dev
);
737 case OCRDMA_DRV_STATS
:
738 data
= ocrdma_driver_dbg_stats(dev
);
746 if (usr_buf_len
< strlen(data
)) {
751 status
= simple_read_from_buffer(buffer
, usr_buf_len
, ppos
, data
,
754 mutex_unlock(&dev
->stats_lock
);
758 static const struct file_operations ocrdma_dbg_ops
= {
759 .owner
= THIS_MODULE
,
761 .read
= ocrdma_dbgfs_ops_read
,
762 .write
= ocrdma_dbgfs_ops_write
,
765 void ocrdma_add_port_stats(struct ocrdma_dev
*dev
)
767 if (!ocrdma_dbgfs_dir
)
770 /* Create post stats base dir */
771 dev
->dir
= debugfs_create_dir(dev
->ibdev
.name
, ocrdma_dbgfs_dir
);
775 dev
->rsrc_stats
.type
= OCRDMA_RSRC_STATS
;
776 dev
->rsrc_stats
.dev
= dev
;
777 if (!debugfs_create_file("resource_stats", S_IRUSR
, dev
->dir
,
778 &dev
->rsrc_stats
, &ocrdma_dbg_ops
))
781 dev
->rx_stats
.type
= OCRDMA_RXSTATS
;
782 dev
->rx_stats
.dev
= dev
;
783 if (!debugfs_create_file("rx_stats", S_IRUSR
, dev
->dir
,
784 &dev
->rx_stats
, &ocrdma_dbg_ops
))
787 dev
->wqe_stats
.type
= OCRDMA_WQESTATS
;
788 dev
->wqe_stats
.dev
= dev
;
789 if (!debugfs_create_file("wqe_stats", S_IRUSR
, dev
->dir
,
790 &dev
->wqe_stats
, &ocrdma_dbg_ops
))
793 dev
->tx_stats
.type
= OCRDMA_TXSTATS
;
794 dev
->tx_stats
.dev
= dev
;
795 if (!debugfs_create_file("tx_stats", S_IRUSR
, dev
->dir
,
796 &dev
->tx_stats
, &ocrdma_dbg_ops
))
799 dev
->db_err_stats
.type
= OCRDMA_DB_ERRSTATS
;
800 dev
->db_err_stats
.dev
= dev
;
801 if (!debugfs_create_file("db_err_stats", S_IRUSR
, dev
->dir
,
802 &dev
->db_err_stats
, &ocrdma_dbg_ops
))
806 dev
->tx_qp_err_stats
.type
= OCRDMA_TXQP_ERRSTATS
;
807 dev
->tx_qp_err_stats
.dev
= dev
;
808 if (!debugfs_create_file("tx_qp_err_stats", S_IRUSR
, dev
->dir
,
809 &dev
->tx_qp_err_stats
, &ocrdma_dbg_ops
))
812 dev
->rx_qp_err_stats
.type
= OCRDMA_RXQP_ERRSTATS
;
813 dev
->rx_qp_err_stats
.dev
= dev
;
814 if (!debugfs_create_file("rx_qp_err_stats", S_IRUSR
, dev
->dir
,
815 &dev
->rx_qp_err_stats
, &ocrdma_dbg_ops
))
819 dev
->tx_dbg_stats
.type
= OCRDMA_TX_DBG_STATS
;
820 dev
->tx_dbg_stats
.dev
= dev
;
821 if (!debugfs_create_file("tx_dbg_stats", S_IRUSR
, dev
->dir
,
822 &dev
->tx_dbg_stats
, &ocrdma_dbg_ops
))
825 dev
->rx_dbg_stats
.type
= OCRDMA_RX_DBG_STATS
;
826 dev
->rx_dbg_stats
.dev
= dev
;
827 if (!debugfs_create_file("rx_dbg_stats", S_IRUSR
, dev
->dir
,
828 &dev
->rx_dbg_stats
, &ocrdma_dbg_ops
))
831 dev
->driver_stats
.type
= OCRDMA_DRV_STATS
;
832 dev
->driver_stats
.dev
= dev
;
833 if (!debugfs_create_file("driver_dbg_stats", S_IRUSR
, dev
->dir
,
834 &dev
->driver_stats
, &ocrdma_dbg_ops
))
837 dev
->reset_stats
.type
= OCRDMA_RESET_STATS
;
838 dev
->reset_stats
.dev
= dev
;
839 if (!debugfs_create_file("reset_stats", S_IRUSR
, dev
->dir
,
840 &dev
->reset_stats
, &ocrdma_dbg_ops
))
846 debugfs_remove_recursive(dev
->dir
);
850 void ocrdma_rem_port_stats(struct ocrdma_dev
*dev
)
854 debugfs_remove_recursive(dev
->dir
);
857 void ocrdma_init_debugfs(void)
859 /* Create base dir in debugfs root dir */
860 ocrdma_dbgfs_dir
= debugfs_create_dir("ocrdma", NULL
);
863 void ocrdma_rem_debugfs(void)
865 debugfs_remove_recursive(ocrdma_dbgfs_dir
);