Merge tag 'trace-v5.11-rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/rostedt...
[linux/fpc-iii.git] / drivers / lightnvm / pblk-rl.c
bloba5f8bc2defbcde1656acfa972301050267e3f77b
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-rl.c - pblk's rate limiter for user I/O
20 #include "pblk.h"
22 static void pblk_rl_kick_u_timer(struct pblk_rl *rl)
24 mod_timer(&rl->u_timer, jiffies + msecs_to_jiffies(5000));
27 int pblk_rl_is_limit(struct pblk_rl *rl)
29 int rb_space;
31 rb_space = atomic_read(&rl->rb_space);
33 return (rb_space == 0);
36 int pblk_rl_user_may_insert(struct pblk_rl *rl, int nr_entries)
38 int rb_user_cnt = atomic_read(&rl->rb_user_cnt);
39 int rb_space = atomic_read(&rl->rb_space);
41 if (unlikely(rb_space >= 0) && (rb_space - nr_entries < 0))
42 return NVM_IO_ERR;
44 if (rb_user_cnt >= rl->rb_user_max)
45 return NVM_IO_REQUEUE;
47 return NVM_IO_OK;
50 void pblk_rl_inserted(struct pblk_rl *rl, int nr_entries)
52 int rb_space = atomic_read(&rl->rb_space);
54 if (unlikely(rb_space >= 0))
55 atomic_sub(nr_entries, &rl->rb_space);
58 int pblk_rl_gc_may_insert(struct pblk_rl *rl, int nr_entries)
60 int rb_gc_cnt = atomic_read(&rl->rb_gc_cnt);
61 int rb_user_active;
63 /* If there is no user I/O let GC take over space on the write buffer */
64 rb_user_active = READ_ONCE(rl->rb_user_active);
65 return (!(rb_gc_cnt >= rl->rb_gc_max && rb_user_active));
68 void pblk_rl_user_in(struct pblk_rl *rl, int nr_entries)
70 atomic_add(nr_entries, &rl->rb_user_cnt);
72 /* Release user I/O state. Protect from GC */
73 smp_store_release(&rl->rb_user_active, 1);
74 pblk_rl_kick_u_timer(rl);
77 void pblk_rl_werr_line_in(struct pblk_rl *rl)
79 atomic_inc(&rl->werr_lines);
82 void pblk_rl_werr_line_out(struct pblk_rl *rl)
84 atomic_dec(&rl->werr_lines);
87 void pblk_rl_gc_in(struct pblk_rl *rl, int nr_entries)
89 atomic_add(nr_entries, &rl->rb_gc_cnt);
92 void pblk_rl_out(struct pblk_rl *rl, int nr_user, int nr_gc)
94 atomic_sub(nr_user, &rl->rb_user_cnt);
95 atomic_sub(nr_gc, &rl->rb_gc_cnt);
98 unsigned long pblk_rl_nr_free_blks(struct pblk_rl *rl)
100 return atomic_read(&rl->free_blocks);
103 unsigned long pblk_rl_nr_user_free_blks(struct pblk_rl *rl)
105 return atomic_read(&rl->free_user_blocks);
108 static void __pblk_rl_update_rates(struct pblk_rl *rl,
109 unsigned long free_blocks)
111 struct pblk *pblk = container_of(rl, struct pblk, rl);
112 int max = rl->rb_budget;
113 int werr_gc_needed = atomic_read(&rl->werr_lines);
115 if (free_blocks >= rl->high) {
116 if (werr_gc_needed) {
117 /* Allocate a small budget for recovering
118 * lines with write errors
120 rl->rb_gc_max = 1 << rl->rb_windows_pw;
121 rl->rb_user_max = max - rl->rb_gc_max;
122 rl->rb_state = PBLK_RL_WERR;
123 } else {
124 rl->rb_user_max = max;
125 rl->rb_gc_max = 0;
126 rl->rb_state = PBLK_RL_OFF;
128 } else if (free_blocks < rl->high) {
129 int shift = rl->high_pw - rl->rb_windows_pw;
130 int user_windows = free_blocks >> shift;
131 int user_max = user_windows << ilog2(NVM_MAX_VLBA);
133 rl->rb_user_max = user_max;
134 rl->rb_gc_max = max - user_max;
136 if (free_blocks <= rl->rsv_blocks) {
137 rl->rb_user_max = 0;
138 rl->rb_gc_max = max;
141 /* In the worst case, we will need to GC lines in the low list
142 * (high valid sector count). If there are lines to GC on high
143 * or mid lists, these will be prioritized
145 rl->rb_state = PBLK_RL_LOW;
148 if (rl->rb_state != PBLK_RL_OFF)
149 pblk_gc_should_start(pblk);
150 else
151 pblk_gc_should_stop(pblk);
154 void pblk_rl_update_rates(struct pblk_rl *rl)
156 __pblk_rl_update_rates(rl, pblk_rl_nr_user_free_blks(rl));
159 void pblk_rl_free_lines_inc(struct pblk_rl *rl, struct pblk_line *line)
161 int blk_in_line = atomic_read(&line->blk_in_line);
162 int free_blocks;
164 atomic_add(blk_in_line, &rl->free_blocks);
165 free_blocks = atomic_add_return(blk_in_line, &rl->free_user_blocks);
167 __pblk_rl_update_rates(rl, free_blocks);
170 void pblk_rl_free_lines_dec(struct pblk_rl *rl, struct pblk_line *line,
171 bool used)
173 int blk_in_line = atomic_read(&line->blk_in_line);
174 int free_blocks;
176 atomic_sub(blk_in_line, &rl->free_blocks);
178 if (used)
179 free_blocks = atomic_sub_return(blk_in_line,
180 &rl->free_user_blocks);
181 else
182 free_blocks = atomic_read(&rl->free_user_blocks);
184 __pblk_rl_update_rates(rl, free_blocks);
187 int pblk_rl_high_thrs(struct pblk_rl *rl)
189 return rl->high;
192 int pblk_rl_max_io(struct pblk_rl *rl)
194 return rl->rb_max_io;
197 static void pblk_rl_u_timer(struct timer_list *t)
199 struct pblk_rl *rl = from_timer(rl, t, u_timer);
201 /* Release user I/O state. Protect from GC */
202 smp_store_release(&rl->rb_user_active, 0);
205 void pblk_rl_free(struct pblk_rl *rl)
207 del_timer(&rl->u_timer);
210 void pblk_rl_init(struct pblk_rl *rl, int budget, int threshold)
212 struct pblk *pblk = container_of(rl, struct pblk, rl);
213 struct nvm_tgt_dev *dev = pblk->dev;
214 struct nvm_geo *geo = &dev->geo;
215 struct pblk_line_mgmt *l_mg = &pblk->l_mg;
216 struct pblk_line_meta *lm = &pblk->lm;
217 int sec_meta, blk_meta;
218 unsigned int rb_windows;
220 /* Consider sectors used for metadata */
221 sec_meta = (lm->smeta_sec + lm->emeta_sec[0]) * l_mg->nr_free_lines;
222 blk_meta = DIV_ROUND_UP(sec_meta, geo->clba);
224 rl->high = pblk->op_blks - blk_meta - lm->blk_per_line;
225 rl->high_pw = get_count_order(rl->high);
227 rl->rsv_blocks = pblk_get_min_chks(pblk);
229 /* This will always be a power-of-2 */
230 rb_windows = budget / NVM_MAX_VLBA;
231 rl->rb_windows_pw = get_count_order(rb_windows);
233 /* To start with, all buffer is available to user I/O writers */
234 rl->rb_budget = budget;
235 rl->rb_user_max = budget;
236 rl->rb_gc_max = 0;
237 rl->rb_state = PBLK_RL_HIGH;
239 /* Maximize I/O size and ansure that back threshold is respected */
240 if (threshold)
241 rl->rb_max_io = budget - pblk->min_write_pgs_data - threshold;
242 else
243 rl->rb_max_io = budget - pblk->min_write_pgs_data - 1;
245 atomic_set(&rl->rb_user_cnt, 0);
246 atomic_set(&rl->rb_gc_cnt, 0);
247 atomic_set(&rl->rb_space, -1);
248 atomic_set(&rl->werr_lines, 0);
250 timer_setup(&rl->u_timer, pblk_rl_u_timer, 0);
252 rl->rb_user_active = 0;
253 rl->rb_gc_active = 0;