1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6 * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7 * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
10 #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
14 void rtrs_clt_update_wc_stats(struct rtrs_clt_con
*con
)
16 struct rtrs_clt_path
*clt_path
= to_clt_path(con
->c
.path
);
17 struct rtrs_clt_stats
*stats
= clt_path
->stats
;
18 struct rtrs_clt_stats_pcpu
*s
;
21 cpu
= raw_smp_processor_id();
22 s
= get_cpu_ptr(stats
->pcpu_stats
);
23 if (con
->cpu
!= cpu
) {
26 /* Careful here, override s pointer */
27 s
= per_cpu_ptr(stats
->pcpu_stats
, con
->cpu
);
28 atomic_inc(&s
->cpu_migr
.from
);
30 put_cpu_ptr(stats
->pcpu_stats
);
33 void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats
*stats
)
35 this_cpu_inc(stats
->pcpu_stats
->rdma
.failover_cnt
);
38 int rtrs_clt_stats_migration_from_cnt_to_str(struct rtrs_clt_stats
*stats
, char *buf
)
40 struct rtrs_clt_stats_pcpu
*s
;
46 for_each_possible_cpu(cpu
) {
47 s
= per_cpu_ptr(stats
->pcpu_stats
, cpu
);
48 used
+= sysfs_emit_at(buf
, used
, "%d ",
49 atomic_read(&s
->cpu_migr
.from
));
52 used
+= sysfs_emit_at(buf
, used
, "\n");
57 int rtrs_clt_stats_migration_to_cnt_to_str(struct rtrs_clt_stats
*stats
, char *buf
)
59 struct rtrs_clt_stats_pcpu
*s
;
65 for_each_possible_cpu(cpu
) {
66 s
= per_cpu_ptr(stats
->pcpu_stats
, cpu
);
67 used
+= sysfs_emit_at(buf
, used
, "%d ", s
->cpu_migr
.to
);
70 used
+= sysfs_emit_at(buf
, used
, "\n");
75 int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats
*stats
, char *buf
)
77 return sysfs_emit(buf
, "%d %d\n", stats
->reconnects
.successful_cnt
,
78 stats
->reconnects
.fail_cnt
);
81 ssize_t
rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats
*stats
, char *page
)
83 struct rtrs_clt_stats_rdma sum
;
84 struct rtrs_clt_stats_rdma
*r
;
87 memset(&sum
, 0, sizeof(sum
));
89 for_each_possible_cpu(cpu
) {
90 r
= &per_cpu_ptr(stats
->pcpu_stats
, cpu
)->rdma
;
92 sum
.dir
[READ
].cnt
+= r
->dir
[READ
].cnt
;
93 sum
.dir
[READ
].size_total
+= r
->dir
[READ
].size_total
;
94 sum
.dir
[WRITE
].cnt
+= r
->dir
[WRITE
].cnt
;
95 sum
.dir
[WRITE
].size_total
+= r
->dir
[WRITE
].size_total
;
96 sum
.failover_cnt
+= r
->failover_cnt
;
99 return sysfs_emit(page
, "%llu %llu %llu %llu %u %llu\n",
100 sum
.dir
[READ
].cnt
, sum
.dir
[READ
].size_total
,
101 sum
.dir
[WRITE
].cnt
, sum
.dir
[WRITE
].size_total
,
102 atomic_read(&stats
->inflight
), sum
.failover_cnt
);
105 ssize_t
rtrs_clt_reset_all_help(struct rtrs_clt_stats
*s
, char *page
)
107 return sysfs_emit(page
, "echo 1 to reset all statistics\n");
110 int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats
*stats
, bool enable
)
112 struct rtrs_clt_stats_pcpu
*s
;
118 for_each_possible_cpu(cpu
) {
119 s
= per_cpu_ptr(stats
->pcpu_stats
, cpu
);
120 memset(&s
->rdma
, 0, sizeof(s
->rdma
));
126 int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats
*stats
, bool enable
)
128 struct rtrs_clt_stats_pcpu
*s
;
134 for_each_possible_cpu(cpu
) {
135 s
= per_cpu_ptr(stats
->pcpu_stats
, cpu
);
136 memset(&s
->cpu_migr
, 0, sizeof(s
->cpu_migr
));
142 int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats
*stats
, bool enable
)
147 memset(&stats
->reconnects
, 0, sizeof(stats
->reconnects
));
152 int rtrs_clt_reset_all_stats(struct rtrs_clt_stats
*s
, bool enable
)
155 rtrs_clt_reset_rdma_stats(s
, enable
);
156 rtrs_clt_reset_cpu_migr_stats(s
, enable
);
157 rtrs_clt_reset_reconnects_stat(s
, enable
);
158 atomic_set(&s
->inflight
, 0);
165 static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats
*stats
,
168 this_cpu_inc(stats
->pcpu_stats
->rdma
.dir
[d
].cnt
);
169 this_cpu_add(stats
->pcpu_stats
->rdma
.dir
[d
].size_total
, size
);
172 void rtrs_clt_update_all_stats(struct rtrs_clt_io_req
*req
, int dir
)
174 struct rtrs_clt_con
*con
= req
->con
;
175 struct rtrs_clt_path
*clt_path
= to_clt_path(con
->c
.path
);
176 struct rtrs_clt_stats
*stats
= clt_path
->stats
;
179 len
= req
->usr_len
+ req
->data_len
;
180 rtrs_clt_update_rdma_stats(stats
, len
, dir
);
181 if (req
->mp_policy
== MP_POLICY_MIN_INFLIGHT
)
182 atomic_inc(&stats
->inflight
);
185 int rtrs_clt_init_stats(struct rtrs_clt_stats
*stats
)
187 stats
->pcpu_stats
= alloc_percpu(typeof(*stats
->pcpu_stats
));
188 if (!stats
->pcpu_stats
)
192 * successful_cnt will be set to 0 after session
193 * is established for the first time
195 stats
->reconnects
.successful_cnt
= -1;