1 // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
3 * Copyright (c) 2019-2020, Mellanox Technologies Ltd. All rights reserved.
6 #include <uapi/rdma/rdma_netlink.h>
7 #include <linux/mlx5/rsc_dump.h>
8 #include <rdma/ib_umem_odp.h>
9 #include <rdma/restrack.h>
13 #define MAX_DUMP_SIZE 1024
15 static int dump_rsc(struct mlx5_core_dev
*dev
, enum mlx5_sgmt_type type
,
16 int index
, void *data
, int *data_len
)
18 struct mlx5_core_dev
*mdev
= dev
;
19 struct mlx5_rsc_dump_cmd
*cmd
;
20 struct mlx5_rsc_key key
= {};
27 page
= alloc_page(GFP_KERNEL
);
36 cmd
= mlx5_rsc_dump_cmd_create(mdev
, &key
);
43 cmd_err
= mlx5_rsc_dump_next(mdev
, cmd
, page
, &size
);
44 if (cmd_err
< 0 || size
+ offset
> MAX_DUMP_SIZE
) {
48 memcpy(data
+ offset
, page_address(page
), size
);
50 } while (cmd_err
> 0);
54 mlx5_rsc_dump_cmd_destroy(cmd
);
60 static int fill_res_raw(struct sk_buff
*msg
, struct mlx5_ib_dev
*dev
,
61 enum mlx5_sgmt_type type
, u32 key
)
67 data
= kzalloc(MAX_DUMP_SIZE
, GFP_KERNEL
);
71 err
= dump_rsc(dev
->mdev
, type
, key
, data
, &len
);
75 err
= nla_put(msg
, RDMA_NLDEV_ATTR_RES_RAW
, len
, data
);
81 static int fill_stat_mr_entry(struct sk_buff
*msg
, struct ib_mr
*ibmr
)
83 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
84 struct nlattr
*table_attr
;
86 if (!(mr
->access_flags
& IB_ACCESS_ON_DEMAND
))
89 table_attr
= nla_nest_start(msg
,
90 RDMA_NLDEV_ATTR_STAT_HWCOUNTERS
);
95 if (rdma_nl_stat_hwcounter_entry(msg
, "page_faults",
96 atomic64_read(&mr
->odp_stats
.faults
)))
98 if (rdma_nl_stat_hwcounter_entry(
99 msg
, "page_invalidations",
100 atomic64_read(&mr
->odp_stats
.invalidations
)))
102 if (rdma_nl_stat_hwcounter_entry(msg
, "page_prefetch",
103 atomic64_read(&mr
->odp_stats
.prefetch
)))
106 nla_nest_end(msg
, table_attr
);
110 nla_nest_cancel(msg
, table_attr
);
115 static int fill_res_mr_entry_raw(struct sk_buff
*msg
, struct ib_mr
*ibmr
)
117 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
119 return fill_res_raw(msg
, mr_to_mdev(mr
), MLX5_SGMT_TYPE_PRM_QUERY_MKEY
,
120 mlx5_mkey_to_idx(mr
->mmkey
.key
));
123 static int fill_res_mr_entry(struct sk_buff
*msg
, struct ib_mr
*ibmr
)
125 struct mlx5_ib_mr
*mr
= to_mmr(ibmr
);
126 struct nlattr
*table_attr
;
128 if (!(mr
->access_flags
& IB_ACCESS_ON_DEMAND
))
131 table_attr
= nla_nest_start(msg
, RDMA_NLDEV_ATTR_DRIVER
);
135 if (mr
->is_odp_implicit
) {
136 if (rdma_nl_put_driver_string(msg
, "odp", "implicit"))
139 if (rdma_nl_put_driver_string(msg
, "odp", "explicit"))
143 nla_nest_end(msg
, table_attr
);
147 nla_nest_cancel(msg
, table_attr
);
151 static int fill_res_cq_entry_raw(struct sk_buff
*msg
, struct ib_cq
*ibcq
)
153 struct mlx5_ib_dev
*dev
= to_mdev(ibcq
->device
);
154 struct mlx5_ib_cq
*cq
= to_mcq(ibcq
);
156 return fill_res_raw(msg
, dev
, MLX5_SGMT_TYPE_PRM_QUERY_CQ
, cq
->mcq
.cqn
);
159 static int fill_res_qp_entry_raw(struct sk_buff
*msg
, struct ib_qp
*ibqp
)
161 struct mlx5_ib_dev
*dev
= to_mdev(ibqp
->device
);
163 return fill_res_raw(msg
, dev
, MLX5_SGMT_TYPE_PRM_QUERY_QP
,
167 static const struct ib_device_ops restrack_ops
= {
168 .fill_res_cq_entry_raw
= fill_res_cq_entry_raw
,
169 .fill_res_mr_entry
= fill_res_mr_entry
,
170 .fill_res_mr_entry_raw
= fill_res_mr_entry_raw
,
171 .fill_res_qp_entry_raw
= fill_res_qp_entry_raw
,
172 .fill_stat_mr_entry
= fill_stat_mr_entry
,
175 int mlx5_ib_restrack_init(struct mlx5_ib_dev
*dev
)
177 ib_set_device_ops(&dev
->ib_dev
, &restrack_ops
);