Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
[cris-mirror.git] / include / linux / net_dim.h
blobbebeaad897cc406d550175603a19a1c15e582942
1 /*
2 * Copyright (c) 2016, Mellanox Technologies. All rights reserved.
3 * Copyright (c) 2017-2018, Broadcom Limited. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
34 #ifndef NET_DIM_H
35 #define NET_DIM_H
37 #include <linux/module.h>
39 struct net_dim_cq_moder {
40 u16 usec;
41 u16 pkts;
42 u8 cq_period_mode;
45 struct net_dim_sample {
46 ktime_t time;
47 u32 pkt_ctr;
48 u32 byte_ctr;
49 u16 event_ctr;
52 struct net_dim_stats {
53 int ppms; /* packets per msec */
54 int bpms; /* bytes per msec */
55 int epms; /* events per msec */
58 struct net_dim { /* Adaptive Moderation */
59 u8 state;
60 struct net_dim_stats prev_stats;
61 struct net_dim_sample start_sample;
62 struct work_struct work;
63 u8 profile_ix;
64 u8 mode;
65 u8 tune_state;
66 u8 steps_right;
67 u8 steps_left;
68 u8 tired;
71 enum {
72 NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE = 0x0,
73 NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE = 0x1,
74 NET_DIM_CQ_PERIOD_NUM_MODES
77 /* Adaptive moderation logic */
78 enum {
79 NET_DIM_START_MEASURE,
80 NET_DIM_MEASURE_IN_PROGRESS,
81 NET_DIM_APPLY_NEW_PROFILE,
84 enum {
85 NET_DIM_PARKING_ON_TOP,
86 NET_DIM_PARKING_TIRED,
87 NET_DIM_GOING_RIGHT,
88 NET_DIM_GOING_LEFT,
91 enum {
92 NET_DIM_STATS_WORSE,
93 NET_DIM_STATS_SAME,
94 NET_DIM_STATS_BETTER,
97 enum {
98 NET_DIM_STEPPED,
99 NET_DIM_TOO_TIRED,
100 NET_DIM_ON_EDGE,
103 #define NET_DIM_PARAMS_NUM_PROFILES 5
104 /* Adaptive moderation profiles */
105 #define NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE 256
106 #define NET_DIM_DEF_PROFILE_CQE 1
107 #define NET_DIM_DEF_PROFILE_EQE 1
109 /* All profiles sizes must be NET_PARAMS_DIM_NUM_PROFILES */
110 #define NET_DIM_EQE_PROFILES { \
111 {1, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
112 {8, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
113 {64, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
114 {128, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
115 {256, NET_DIM_DEFAULT_RX_CQ_MODERATION_PKTS_FROM_EQE}, \
118 #define NET_DIM_CQE_PROFILES { \
119 {2, 256}, \
120 {8, 128}, \
121 {16, 64}, \
122 {32, 64}, \
123 {64, 64} \
126 static const struct net_dim_cq_moder
127 profile[NET_DIM_CQ_PERIOD_NUM_MODES][NET_DIM_PARAMS_NUM_PROFILES] = {
128 NET_DIM_EQE_PROFILES,
129 NET_DIM_CQE_PROFILES,
132 static inline struct net_dim_cq_moder net_dim_get_profile(u8 cq_period_mode,
133 int ix)
135 struct net_dim_cq_moder cq_moder;
137 cq_moder = profile[cq_period_mode][ix];
138 cq_moder.cq_period_mode = cq_period_mode;
139 return cq_moder;
142 static inline struct net_dim_cq_moder net_dim_get_def_profile(u8 rx_cq_period_mode)
144 int default_profile_ix;
146 if (rx_cq_period_mode == NET_DIM_CQ_PERIOD_MODE_START_FROM_CQE)
147 default_profile_ix = NET_DIM_DEF_PROFILE_CQE;
148 else /* NET_DIM_CQ_PERIOD_MODE_START_FROM_EQE */
149 default_profile_ix = NET_DIM_DEF_PROFILE_EQE;
151 return net_dim_get_profile(rx_cq_period_mode, default_profile_ix);
154 static inline bool net_dim_on_top(struct net_dim *dim)
156 switch (dim->tune_state) {
157 case NET_DIM_PARKING_ON_TOP:
158 case NET_DIM_PARKING_TIRED:
159 return true;
160 case NET_DIM_GOING_RIGHT:
161 return (dim->steps_left > 1) && (dim->steps_right == 1);
162 default: /* NET_DIM_GOING_LEFT */
163 return (dim->steps_right > 1) && (dim->steps_left == 1);
167 static inline void net_dim_turn(struct net_dim *dim)
169 switch (dim->tune_state) {
170 case NET_DIM_PARKING_ON_TOP:
171 case NET_DIM_PARKING_TIRED:
172 break;
173 case NET_DIM_GOING_RIGHT:
174 dim->tune_state = NET_DIM_GOING_LEFT;
175 dim->steps_left = 0;
176 break;
177 case NET_DIM_GOING_LEFT:
178 dim->tune_state = NET_DIM_GOING_RIGHT;
179 dim->steps_right = 0;
180 break;
184 static inline int net_dim_step(struct net_dim *dim)
186 if (dim->tired == (NET_DIM_PARAMS_NUM_PROFILES * 2))
187 return NET_DIM_TOO_TIRED;
189 switch (dim->tune_state) {
190 case NET_DIM_PARKING_ON_TOP:
191 case NET_DIM_PARKING_TIRED:
192 break;
193 case NET_DIM_GOING_RIGHT:
194 if (dim->profile_ix == (NET_DIM_PARAMS_NUM_PROFILES - 1))
195 return NET_DIM_ON_EDGE;
196 dim->profile_ix++;
197 dim->steps_right++;
198 break;
199 case NET_DIM_GOING_LEFT:
200 if (dim->profile_ix == 0)
201 return NET_DIM_ON_EDGE;
202 dim->profile_ix--;
203 dim->steps_left++;
204 break;
207 dim->tired++;
208 return NET_DIM_STEPPED;
211 static inline void net_dim_park_on_top(struct net_dim *dim)
213 dim->steps_right = 0;
214 dim->steps_left = 0;
215 dim->tired = 0;
216 dim->tune_state = NET_DIM_PARKING_ON_TOP;
219 static inline void net_dim_park_tired(struct net_dim *dim)
221 dim->steps_right = 0;
222 dim->steps_left = 0;
223 dim->tune_state = NET_DIM_PARKING_TIRED;
226 static inline void net_dim_exit_parking(struct net_dim *dim)
228 dim->tune_state = dim->profile_ix ? NET_DIM_GOING_LEFT :
229 NET_DIM_GOING_RIGHT;
230 net_dim_step(dim);
233 #define IS_SIGNIFICANT_DIFF(val, ref) \
234 (((100 * abs((val) - (ref))) / (ref)) > 10) /* more than 10% difference */
236 static inline int net_dim_stats_compare(struct net_dim_stats *curr,
237 struct net_dim_stats *prev)
239 if (!prev->bpms)
240 return curr->bpms ? NET_DIM_STATS_BETTER :
241 NET_DIM_STATS_SAME;
243 if (IS_SIGNIFICANT_DIFF(curr->bpms, prev->bpms))
244 return (curr->bpms > prev->bpms) ? NET_DIM_STATS_BETTER :
245 NET_DIM_STATS_WORSE;
247 if (!prev->ppms)
248 return curr->ppms ? NET_DIM_STATS_BETTER :
249 NET_DIM_STATS_SAME;
251 if (IS_SIGNIFICANT_DIFF(curr->ppms, prev->ppms))
252 return (curr->ppms > prev->ppms) ? NET_DIM_STATS_BETTER :
253 NET_DIM_STATS_WORSE;
255 if (!prev->epms)
256 return NET_DIM_STATS_SAME;
258 if (IS_SIGNIFICANT_DIFF(curr->epms, prev->epms))
259 return (curr->epms < prev->epms) ? NET_DIM_STATS_BETTER :
260 NET_DIM_STATS_WORSE;
262 return NET_DIM_STATS_SAME;
265 static inline bool net_dim_decision(struct net_dim_stats *curr_stats,
266 struct net_dim *dim)
268 int prev_state = dim->tune_state;
269 int prev_ix = dim->profile_ix;
270 int stats_res;
271 int step_res;
273 switch (dim->tune_state) {
274 case NET_DIM_PARKING_ON_TOP:
275 stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
276 if (stats_res != NET_DIM_STATS_SAME)
277 net_dim_exit_parking(dim);
278 break;
280 case NET_DIM_PARKING_TIRED:
281 dim->tired--;
282 if (!dim->tired)
283 net_dim_exit_parking(dim);
284 break;
286 case NET_DIM_GOING_RIGHT:
287 case NET_DIM_GOING_LEFT:
288 stats_res = net_dim_stats_compare(curr_stats, &dim->prev_stats);
289 if (stats_res != NET_DIM_STATS_BETTER)
290 net_dim_turn(dim);
292 if (net_dim_on_top(dim)) {
293 net_dim_park_on_top(dim);
294 break;
297 step_res = net_dim_step(dim);
298 switch (step_res) {
299 case NET_DIM_ON_EDGE:
300 net_dim_park_on_top(dim);
301 break;
302 case NET_DIM_TOO_TIRED:
303 net_dim_park_tired(dim);
304 break;
307 break;
310 if ((prev_state != NET_DIM_PARKING_ON_TOP) ||
311 (dim->tune_state != NET_DIM_PARKING_ON_TOP))
312 dim->prev_stats = *curr_stats;
314 return dim->profile_ix != prev_ix;
317 static inline void net_dim_sample(u16 event_ctr,
318 u64 packets,
319 u64 bytes,
320 struct net_dim_sample *s)
322 s->time = ktime_get();
323 s->pkt_ctr = packets;
324 s->byte_ctr = bytes;
325 s->event_ctr = event_ctr;
328 #define NET_DIM_NEVENTS 64
329 #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE)
330 #define BIT_GAP(bits, end, start) ((((end) - (start)) + BIT_ULL(bits)) & (BIT_ULL(bits) - 1))
332 static inline void net_dim_calc_stats(struct net_dim_sample *start,
333 struct net_dim_sample *end,
334 struct net_dim_stats *curr_stats)
336 /* u32 holds up to 71 minutes, should be enough */
337 u32 delta_us = ktime_us_delta(end->time, start->time);
338 u32 npkts = BIT_GAP(BITS_PER_TYPE(u32), end->pkt_ctr, start->pkt_ctr);
339 u32 nbytes = BIT_GAP(BITS_PER_TYPE(u32), end->byte_ctr,
340 start->byte_ctr);
342 if (!delta_us)
343 return;
345 curr_stats->ppms = DIV_ROUND_UP(npkts * USEC_PER_MSEC, delta_us);
346 curr_stats->bpms = DIV_ROUND_UP(nbytes * USEC_PER_MSEC, delta_us);
347 curr_stats->epms = DIV_ROUND_UP(NET_DIM_NEVENTS * USEC_PER_MSEC,
348 delta_us);
351 static inline void net_dim(struct net_dim *dim,
352 struct net_dim_sample end_sample)
354 struct net_dim_stats curr_stats;
355 u16 nevents;
357 switch (dim->state) {
358 case NET_DIM_MEASURE_IN_PROGRESS:
359 nevents = BIT_GAP(BITS_PER_TYPE(u16),
360 end_sample.event_ctr,
361 dim->start_sample.event_ctr);
362 if (nevents < NET_DIM_NEVENTS)
363 break;
364 net_dim_calc_stats(&dim->start_sample, &end_sample,
365 &curr_stats);
366 if (net_dim_decision(&curr_stats, dim)) {
367 dim->state = NET_DIM_APPLY_NEW_PROFILE;
368 schedule_work(&dim->work);
369 break;
371 /* fall through */
372 case NET_DIM_START_MEASURE:
373 dim->state = NET_DIM_MEASURE_IN_PROGRESS;
374 break;
375 case NET_DIM_APPLY_NEW_PROFILE:
376 break;
380 #endif /* NET_DIM_H */