perf python: Do not force closing original perf descriptor in evlist.get_pollfd()
[linux/fpc-iii.git] / drivers / lightnvm / pblk-map.c
blob6dcbd44e3acb4c68ce1749245f388e9d7a4322ec
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-map.c - pblk's lba-ppa mapping strategy
20 #include "pblk.h"
22 static int pblk_map_page_data(struct pblk *pblk, unsigned int sentry,
23 struct ppa_addr *ppa_list,
24 unsigned long *lun_bitmap,
25 struct pblk_sec_meta *meta_list,
26 unsigned int valid_secs)
28 struct pblk_line *line = pblk_line_get_data(pblk);
29 struct pblk_emeta *emeta;
30 struct pblk_w_ctx *w_ctx;
31 __le64 *lba_list;
32 u64 paddr;
33 int nr_secs = pblk->min_write_pgs;
34 int i;
36 if (pblk_line_is_full(line)) {
37 struct pblk_line *prev_line = line;
39 /* If we cannot allocate a new line, make sure to store metadata
40 * on current line and then fail
42 line = pblk_line_replace_data(pblk);
43 pblk_line_close_meta(pblk, prev_line);
45 if (!line)
46 return -EINTR;
49 emeta = line->emeta;
50 lba_list = emeta_to_lbas(pblk, emeta->buf);
52 paddr = pblk_alloc_page(pblk, line, nr_secs);
54 for (i = 0; i < nr_secs; i++, paddr++) {
55 __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
57 /* ppa to be sent to the device */
58 ppa_list[i] = addr_to_gen_ppa(pblk, paddr, line->id);
60 /* Write context for target bio completion on write buffer. Note
61 * that the write buffer is protected by the sync backpointer,
62 * and a single writer thread have access to each specific entry
63 * at a time. Thus, it is safe to modify the context for the
64 * entry we are setting up for submission without taking any
65 * lock or memory barrier.
67 if (i < valid_secs) {
68 kref_get(&line->ref);
69 w_ctx = pblk_rb_w_ctx(&pblk->rwb, sentry + i);
70 w_ctx->ppa = ppa_list[i];
71 meta_list[i].lba = cpu_to_le64(w_ctx->lba);
72 lba_list[paddr] = cpu_to_le64(w_ctx->lba);
73 if (lba_list[paddr] != addr_empty)
74 line->nr_valid_lbas++;
75 else
76 atomic64_inc(&pblk->pad_wa);
77 } else {
78 lba_list[paddr] = meta_list[i].lba = addr_empty;
79 __pblk_map_invalidate(pblk, line, paddr);
83 pblk_down_rq(pblk, ppa_list[0], lun_bitmap);
84 return 0;
87 void pblk_map_rq(struct pblk *pblk, struct nvm_rq *rqd, unsigned int sentry,
88 unsigned long *lun_bitmap, unsigned int valid_secs,
89 unsigned int off)
91 struct pblk_sec_meta *meta_list = rqd->meta_list;
92 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
93 unsigned int map_secs;
94 int min = pblk->min_write_pgs;
95 int i;
97 for (i = off; i < rqd->nr_ppas; i += min) {
98 map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
99 if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
100 lun_bitmap, &meta_list[i], map_secs)) {
101 bio_put(rqd->bio);
102 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
103 pblk_pipeline_stop(pblk);
108 /* only if erase_ppa is set, acquire erase semaphore */
109 void pblk_map_erase_rq(struct pblk *pblk, struct nvm_rq *rqd,
110 unsigned int sentry, unsigned long *lun_bitmap,
111 unsigned int valid_secs, struct ppa_addr *erase_ppa)
113 struct nvm_tgt_dev *dev = pblk->dev;
114 struct nvm_geo *geo = &dev->geo;
115 struct pblk_line_meta *lm = &pblk->lm;
116 struct pblk_sec_meta *meta_list = rqd->meta_list;
117 struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
118 struct pblk_line *e_line, *d_line;
119 unsigned int map_secs;
120 int min = pblk->min_write_pgs;
121 int i, erase_lun;
123 for (i = 0; i < rqd->nr_ppas; i += min) {
124 map_secs = (i + min > valid_secs) ? (valid_secs % min) : min;
125 if (pblk_map_page_data(pblk, sentry + i, &ppa_list[i],
126 lun_bitmap, &meta_list[i], map_secs)) {
127 bio_put(rqd->bio);
128 pblk_free_rqd(pblk, rqd, PBLK_WRITE);
129 pblk_pipeline_stop(pblk);
132 erase_lun = pblk_ppa_to_pos(geo, ppa_list[i]);
134 /* line can change after page map. We might also be writing the
135 * last line.
137 e_line = pblk_line_get_erase(pblk);
138 if (!e_line)
139 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
140 valid_secs, i + min);
142 spin_lock(&e_line->lock);
143 if (!test_bit(erase_lun, e_line->erase_bitmap)) {
144 set_bit(erase_lun, e_line->erase_bitmap);
145 atomic_dec(&e_line->left_eblks);
147 *erase_ppa = ppa_list[i];
148 erase_ppa->a.blk = e_line->id;
150 spin_unlock(&e_line->lock);
152 /* Avoid evaluating e_line->left_eblks */
153 return pblk_map_rq(pblk, rqd, sentry, lun_bitmap,
154 valid_secs, i + min);
156 spin_unlock(&e_line->lock);
159 d_line = pblk_line_get_data(pblk);
161 /* line can change after page map. We might also be writing the
162 * last line.
164 e_line = pblk_line_get_erase(pblk);
165 if (!e_line)
166 return;
168 /* Erase blocks that are bad in this line but might not be in next */
169 if (unlikely(pblk_ppa_empty(*erase_ppa)) &&
170 bitmap_weight(d_line->blk_bitmap, lm->blk_per_line)) {
171 int bit = -1;
173 retry:
174 bit = find_next_bit(d_line->blk_bitmap,
175 lm->blk_per_line, bit + 1);
176 if (bit >= lm->blk_per_line)
177 return;
179 spin_lock(&e_line->lock);
180 if (test_bit(bit, e_line->erase_bitmap)) {
181 spin_unlock(&e_line->lock);
182 goto retry;
184 spin_unlock(&e_line->lock);
186 set_bit(bit, e_line->erase_bitmap);
187 atomic_dec(&e_line->left_eblks);
188 *erase_ppa = pblk->luns[bit].bppa; /* set ch and lun */
189 erase_ppa->a.blk = e_line->id;