perf python: Do not force closing original perf descriptor in evlist.get_pollfd()
[linux/fpc-iii.git] / drivers / lightnvm / pblk-cache.c
blobc9fa26f9565980602b445633bef3f0a0df9f3f64
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Matias Bjorling <matias@cnexlabs.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-cache.c - pblk's write cache
19 #include "pblk.h"
21 int pblk_write_to_cache(struct pblk *pblk, struct bio *bio, unsigned long flags)
23 struct request_queue *q = pblk->dev->q;
24 struct pblk_w_ctx w_ctx;
25 sector_t lba = pblk_get_lba(bio);
26 unsigned long start_time = jiffies;
27 unsigned int bpos, pos;
28 int nr_entries = pblk_get_secs(bio);
29 int i, ret;
31 generic_start_io_acct(q, REQ_OP_WRITE, bio_sectors(bio),
32 &pblk->disk->part0);
34 /* Update the write buffer head (mem) with the entries that we can
35 * write. The write in itself cannot fail, so there is no need to
36 * rollback from here on.
38 retry:
39 ret = pblk_rb_may_write_user(&pblk->rwb, bio, nr_entries, &bpos);
40 switch (ret) {
41 case NVM_IO_REQUEUE:
42 io_schedule();
43 goto retry;
44 case NVM_IO_ERR:
45 pblk_pipeline_stop(pblk);
46 goto out;
49 pblk_ppa_set_empty(&w_ctx.ppa);
50 w_ctx.flags = flags;
51 if (bio->bi_opf & REQ_PREFLUSH) {
52 w_ctx.flags |= PBLK_FLUSH_ENTRY;
53 pblk_write_kick(pblk);
56 if (unlikely(!bio_has_data(bio)))
57 goto out;
59 for (i = 0; i < nr_entries; i++) {
60 void *data = bio_data(bio);
62 w_ctx.lba = lba + i;
64 pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + i);
65 pblk_rb_write_entry_user(&pblk->rwb, data, w_ctx, pos);
67 bio_advance(bio, PBLK_EXPOSED_PAGE_SIZE);
70 atomic64_add(nr_entries, &pblk->user_wa);
72 #ifdef CONFIG_NVM_PBLK_DEBUG
73 atomic_long_add(nr_entries, &pblk->inflight_writes);
74 atomic_long_add(nr_entries, &pblk->req_writes);
75 #endif
77 pblk_rl_inserted(&pblk->rl, nr_entries);
79 out:
80 generic_end_io_acct(q, REQ_OP_WRITE, &pblk->disk->part0, start_time);
81 pblk_write_should_kick(pblk);
82 return ret;
86 * On GC the incoming lbas are not necessarily sequential. Also, some of the
87 * lbas might not be valid entries, which are marked as empty by the GC thread
89 int pblk_write_gc_to_cache(struct pblk *pblk, struct pblk_gc_rq *gc_rq)
91 struct pblk_w_ctx w_ctx;
92 unsigned int bpos, pos;
93 void *data = gc_rq->data;
94 int i, valid_entries;
96 /* Update the write buffer head (mem) with the entries that we can
97 * write. The write in itself cannot fail, so there is no need to
98 * rollback from here on.
100 retry:
101 if (!pblk_rb_may_write_gc(&pblk->rwb, gc_rq->secs_to_gc, &bpos)) {
102 io_schedule();
103 goto retry;
106 w_ctx.flags = PBLK_IOTYPE_GC;
107 pblk_ppa_set_empty(&w_ctx.ppa);
109 for (i = 0, valid_entries = 0; i < gc_rq->nr_secs; i++) {
110 if (gc_rq->lba_list[i] == ADDR_EMPTY)
111 continue;
113 w_ctx.lba = gc_rq->lba_list[i];
115 pos = pblk_rb_wrap_pos(&pblk->rwb, bpos + valid_entries);
116 pblk_rb_write_entry_gc(&pblk->rwb, data, w_ctx, gc_rq->line,
117 gc_rq->paddr_list[i], pos);
119 data += PBLK_EXPOSED_PAGE_SIZE;
120 valid_entries++;
123 WARN_ONCE(gc_rq->secs_to_gc != valid_entries,
124 "pblk: inconsistent GC write\n");
126 atomic64_add(valid_entries, &pblk->gc_wa);
128 #ifdef CONFIG_NVM_PBLK_DEBUG
129 atomic_long_add(valid_entries, &pblk->inflight_writes);
130 atomic_long_add(valid_entries, &pblk->recov_gc_writes);
131 #endif
133 pblk_write_should_kick(pblk);
134 return NVM_IO_OK;