perf tools: Don't clone maps from parent when synthesizing forks
[linux/fpc-iii.git] / drivers / lightnvm / pblk-rb.c
blobb1f4b51783f41763fb96605311b897befff9e039
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
6 * Based upon the circular ringbuffer.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * pblk-rb.c - pblk's write buffer
20 #include <linux/circ_buf.h>
22 #include "pblk.h"
24 static DECLARE_RWSEM(pblk_rb_lock);
26 static void pblk_rb_data_free(struct pblk_rb *rb)
28 struct pblk_rb_pages *p, *t;
30 down_write(&pblk_rb_lock);
31 list_for_each_entry_safe(p, t, &rb->pages, list) {
32 free_pages((unsigned long)page_address(p->pages), p->order);
33 list_del(&p->list);
34 kfree(p);
36 up_write(&pblk_rb_lock);
39 void pblk_rb_free(struct pblk_rb *rb)
41 pblk_rb_data_free(rb);
42 vfree(rb->entries);
46 * pblk_rb_calculate_size -- calculate the size of the write buffer
48 static unsigned int pblk_rb_calculate_size(unsigned int nr_entries)
50 /* Alloc a write buffer that can at least fit 128 entries */
51 return (1 << max(get_count_order(nr_entries), 7));
55 * Initialize ring buffer. The data and metadata buffers must be previously
56 * allocated and their size must be a power of two
57 * (Documentation/core-api/circular-buffers.rst)
59 int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold,
60 unsigned int seg_size)
62 struct pblk *pblk = container_of(rb, struct pblk, rwb);
63 struct pblk_rb_entry *entries;
64 unsigned int init_entry = 0;
65 unsigned int max_order = MAX_ORDER - 1;
66 unsigned int power_size, power_seg_sz;
67 unsigned int alloc_order, order, iter;
68 unsigned int nr_entries;
70 nr_entries = pblk_rb_calculate_size(size);
71 entries = vzalloc(array_size(nr_entries, sizeof(struct pblk_rb_entry)));
72 if (!entries)
73 return -ENOMEM;
75 power_size = get_count_order(size);
76 power_seg_sz = get_count_order(seg_size);
78 down_write(&pblk_rb_lock);
79 rb->entries = entries;
80 rb->seg_size = (1 << power_seg_sz);
81 rb->nr_entries = (1 << power_size);
82 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0;
83 rb->back_thres = threshold;
84 rb->flush_point = EMPTY_ENTRY;
86 spin_lock_init(&rb->w_lock);
87 spin_lock_init(&rb->s_lock);
89 INIT_LIST_HEAD(&rb->pages);
91 alloc_order = power_size;
92 if (alloc_order >= max_order) {
93 order = max_order;
94 iter = (1 << (alloc_order - max_order));
95 } else {
96 order = alloc_order;
97 iter = 1;
100 do {
101 struct pblk_rb_entry *entry;
102 struct pblk_rb_pages *page_set;
103 void *kaddr;
104 unsigned long set_size;
105 int i;
107 page_set = kmalloc(sizeof(struct pblk_rb_pages), GFP_KERNEL);
108 if (!page_set) {
109 up_write(&pblk_rb_lock);
110 vfree(entries);
111 return -ENOMEM;
114 page_set->order = order;
115 page_set->pages = alloc_pages(GFP_KERNEL, order);
116 if (!page_set->pages) {
117 kfree(page_set);
118 pblk_rb_data_free(rb);
119 up_write(&pblk_rb_lock);
120 vfree(entries);
121 return -ENOMEM;
123 kaddr = page_address(page_set->pages);
125 entry = &rb->entries[init_entry];
126 entry->data = kaddr;
127 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
128 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
130 set_size = (1 << order);
131 for (i = 1; i < set_size; i++) {
132 entry = &rb->entries[init_entry];
133 entry->cacheline = pblk_cacheline_to_addr(init_entry++);
134 entry->data = kaddr + (i * rb->seg_size);
135 entry->w_ctx.flags = PBLK_WRITABLE_ENTRY;
136 bio_list_init(&entry->w_ctx.bios);
139 list_add_tail(&page_set->list, &rb->pages);
140 iter--;
141 } while (iter > 0);
142 up_write(&pblk_rb_lock);
144 #ifdef CONFIG_NVM_PBLK_DEBUG
145 atomic_set(&rb->inflight_flush_point, 0);
146 #endif
149 * Initialize rate-limiter, which controls access to the write buffer
150 * but user and GC I/O
152 pblk_rl_init(&pblk->rl, rb->nr_entries);
154 return 0;
157 static void clean_wctx(struct pblk_w_ctx *w_ctx)
159 int flags;
161 flags = READ_ONCE(w_ctx->flags);
162 WARN_ONCE(!(flags & PBLK_SUBMITTED_ENTRY),
163 "pblk: overwriting unsubmitted data\n");
165 /* Release flags on context. Protect from writes and reads */
166 smp_store_release(&w_ctx->flags, PBLK_WRITABLE_ENTRY);
167 pblk_ppa_set_empty(&w_ctx->ppa);
168 w_ctx->lba = ADDR_EMPTY;
171 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
172 #define pblk_rb_ring_space(rb, head, tail, size) \
173 (CIRC_SPACE(head, tail, size))
176 * Buffer space is calculated with respect to the back pointer signaling
177 * synchronized entries to the media.
179 static unsigned int pblk_rb_space(struct pblk_rb *rb)
181 unsigned int mem = READ_ONCE(rb->mem);
182 unsigned int sync = READ_ONCE(rb->sync);
184 return pblk_rb_ring_space(rb, mem, sync, rb->nr_entries);
187 unsigned int pblk_rb_ptr_wrap(struct pblk_rb *rb, unsigned int p,
188 unsigned int nr_entries)
190 return (p + nr_entries) & (rb->nr_entries - 1);
194 * Buffer count is calculated with respect to the submission entry signaling the
195 * entries that are available to send to the media
197 unsigned int pblk_rb_read_count(struct pblk_rb *rb)
199 unsigned int mem = READ_ONCE(rb->mem);
200 unsigned int subm = READ_ONCE(rb->subm);
202 return pblk_rb_ring_count(mem, subm, rb->nr_entries);
205 unsigned int pblk_rb_sync_count(struct pblk_rb *rb)
207 unsigned int mem = READ_ONCE(rb->mem);
208 unsigned int sync = READ_ONCE(rb->sync);
210 return pblk_rb_ring_count(mem, sync, rb->nr_entries);
213 unsigned int pblk_rb_read_commit(struct pblk_rb *rb, unsigned int nr_entries)
215 unsigned int subm;
217 subm = READ_ONCE(rb->subm);
218 /* Commit read means updating submission pointer */
219 smp_store_release(&rb->subm, pblk_rb_ptr_wrap(rb, subm, nr_entries));
221 return subm;
224 static int __pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int to_update)
226 struct pblk *pblk = container_of(rb, struct pblk, rwb);
227 struct pblk_line *line;
228 struct pblk_rb_entry *entry;
229 struct pblk_w_ctx *w_ctx;
230 unsigned int user_io = 0, gc_io = 0;
231 unsigned int i;
232 int flags;
234 for (i = 0; i < to_update; i++) {
235 entry = &rb->entries[rb->l2p_update];
236 w_ctx = &entry->w_ctx;
238 flags = READ_ONCE(entry->w_ctx.flags);
239 if (flags & PBLK_IOTYPE_USER)
240 user_io++;
241 else if (flags & PBLK_IOTYPE_GC)
242 gc_io++;
243 else
244 WARN(1, "pblk: unknown IO type\n");
246 pblk_update_map_dev(pblk, w_ctx->lba, w_ctx->ppa,
247 entry->cacheline);
249 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
250 kref_put(&line->ref, pblk_line_put);
251 clean_wctx(w_ctx);
252 rb->l2p_update = pblk_rb_ptr_wrap(rb, rb->l2p_update, 1);
255 pblk_rl_out(&pblk->rl, user_io, gc_io);
257 return 0;
261 * When we move the l2p_update pointer, we update the l2p table - lookups will
262 * point to the physical address instead of to the cacheline in the write buffer
263 * from this moment on.
265 static int pblk_rb_update_l2p(struct pblk_rb *rb, unsigned int nr_entries,
266 unsigned int mem, unsigned int sync)
268 unsigned int space, count;
269 int ret = 0;
271 lockdep_assert_held(&rb->w_lock);
273 /* Update l2p only as buffer entries are being overwritten */
274 space = pblk_rb_ring_space(rb, mem, rb->l2p_update, rb->nr_entries);
275 if (space > nr_entries)
276 goto out;
278 count = nr_entries - space;
279 /* l2p_update used exclusively under rb->w_lock */
280 ret = __pblk_rb_update_l2p(rb, count);
282 out:
283 return ret;
287 * Update the l2p entry for all sectors stored on the write buffer. This means
288 * that all future lookups to the l2p table will point to a device address, not
289 * to the cacheline in the write buffer.
291 void pblk_rb_sync_l2p(struct pblk_rb *rb)
293 unsigned int sync;
294 unsigned int to_update;
296 spin_lock(&rb->w_lock);
298 /* Protect from reads and writes */
299 sync = smp_load_acquire(&rb->sync);
301 to_update = pblk_rb_ring_count(sync, rb->l2p_update, rb->nr_entries);
302 __pblk_rb_update_l2p(rb, to_update);
304 spin_unlock(&rb->w_lock);
308 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
309 * Typically, 4KB data chunks coming from a bio will be copied to the ring
310 * buffer, thus the write will fail if not all incoming data can be copied.
313 static void __pblk_rb_write_entry(struct pblk_rb *rb, void *data,
314 struct pblk_w_ctx w_ctx,
315 struct pblk_rb_entry *entry)
317 memcpy(entry->data, data, rb->seg_size);
319 entry->w_ctx.lba = w_ctx.lba;
320 entry->w_ctx.ppa = w_ctx.ppa;
323 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
324 struct pblk_w_ctx w_ctx, unsigned int ring_pos)
326 struct pblk *pblk = container_of(rb, struct pblk, rwb);
327 struct pblk_rb_entry *entry;
328 int flags;
330 entry = &rb->entries[ring_pos];
331 flags = READ_ONCE(entry->w_ctx.flags);
332 #ifdef CONFIG_NVM_PBLK_DEBUG
333 /* Caller must guarantee that the entry is free */
334 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
335 #endif
337 __pblk_rb_write_entry(rb, data, w_ctx, entry);
339 pblk_update_map_cache(pblk, w_ctx.lba, entry->cacheline);
340 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
342 /* Release flags on write context. Protect from writes */
343 smp_store_release(&entry->w_ctx.flags, flags);
346 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
347 struct pblk_w_ctx w_ctx, struct pblk_line *line,
348 u64 paddr, unsigned int ring_pos)
350 struct pblk *pblk = container_of(rb, struct pblk, rwb);
351 struct pblk_rb_entry *entry;
352 int flags;
354 entry = &rb->entries[ring_pos];
355 flags = READ_ONCE(entry->w_ctx.flags);
356 #ifdef CONFIG_NVM_PBLK_DEBUG
357 /* Caller must guarantee that the entry is free */
358 BUG_ON(!(flags & PBLK_WRITABLE_ENTRY));
359 #endif
361 __pblk_rb_write_entry(rb, data, w_ctx, entry);
363 if (!pblk_update_map_gc(pblk, w_ctx.lba, entry->cacheline, line, paddr))
364 entry->w_ctx.lba = ADDR_EMPTY;
366 flags = w_ctx.flags | PBLK_WRITTEN_DATA;
368 /* Release flags on write context. Protect from writes */
369 smp_store_release(&entry->w_ctx.flags, flags);
372 static int pblk_rb_flush_point_set(struct pblk_rb *rb, struct bio *bio,
373 unsigned int pos)
375 struct pblk_rb_entry *entry;
376 unsigned int sync, flush_point;
378 pblk_rb_sync_init(rb, NULL);
379 sync = READ_ONCE(rb->sync);
381 if (pos == sync) {
382 pblk_rb_sync_end(rb, NULL);
383 return 0;
386 #ifdef CONFIG_NVM_PBLK_DEBUG
387 atomic_inc(&rb->inflight_flush_point);
388 #endif
390 flush_point = (pos == 0) ? (rb->nr_entries - 1) : (pos - 1);
391 entry = &rb->entries[flush_point];
393 /* Protect flush points */
394 smp_store_release(&rb->flush_point, flush_point);
396 if (bio)
397 bio_list_add(&entry->w_ctx.bios, bio);
399 pblk_rb_sync_end(rb, NULL);
401 return bio ? 1 : 0;
404 static int __pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
405 unsigned int *pos)
407 unsigned int mem;
408 unsigned int sync;
409 unsigned int threshold;
411 sync = READ_ONCE(rb->sync);
412 mem = READ_ONCE(rb->mem);
414 threshold = nr_entries + rb->back_thres;
416 if (pblk_rb_ring_space(rb, mem, sync, rb->nr_entries) < threshold)
417 return 0;
419 if (pblk_rb_update_l2p(rb, nr_entries, mem, sync))
420 return 0;
422 *pos = mem;
424 return 1;
427 static int pblk_rb_may_write(struct pblk_rb *rb, unsigned int nr_entries,
428 unsigned int *pos)
430 if (!__pblk_rb_may_write(rb, nr_entries, pos))
431 return 0;
433 /* Protect from read count */
434 smp_store_release(&rb->mem, pblk_rb_ptr_wrap(rb, *pos, nr_entries));
435 return 1;
438 void pblk_rb_flush(struct pblk_rb *rb)
440 struct pblk *pblk = container_of(rb, struct pblk, rwb);
441 unsigned int mem = READ_ONCE(rb->mem);
443 if (pblk_rb_flush_point_set(rb, NULL, mem))
444 return;
446 pblk_write_kick(pblk);
449 static int pblk_rb_may_write_flush(struct pblk_rb *rb, unsigned int nr_entries,
450 unsigned int *pos, struct bio *bio,
451 int *io_ret)
453 unsigned int mem;
455 if (!__pblk_rb_may_write(rb, nr_entries, pos))
456 return 0;
458 mem = pblk_rb_ptr_wrap(rb, *pos, nr_entries);
459 *io_ret = NVM_IO_DONE;
461 if (bio->bi_opf & REQ_PREFLUSH) {
462 struct pblk *pblk = container_of(rb, struct pblk, rwb);
464 atomic64_inc(&pblk->nr_flush);
465 if (pblk_rb_flush_point_set(&pblk->rwb, bio, mem))
466 *io_ret = NVM_IO_OK;
469 /* Protect from read count */
470 smp_store_release(&rb->mem, mem);
472 return 1;
476 * Atomically check that (i) there is space on the write buffer for the
477 * incoming I/O, and (ii) the current I/O type has enough budget in the write
478 * buffer (rate-limiter).
480 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
481 unsigned int nr_entries, unsigned int *pos)
483 struct pblk *pblk = container_of(rb, struct pblk, rwb);
484 int io_ret;
486 spin_lock(&rb->w_lock);
487 io_ret = pblk_rl_user_may_insert(&pblk->rl, nr_entries);
488 if (io_ret) {
489 spin_unlock(&rb->w_lock);
490 return io_ret;
493 if (!pblk_rb_may_write_flush(rb, nr_entries, pos, bio, &io_ret)) {
494 spin_unlock(&rb->w_lock);
495 return NVM_IO_REQUEUE;
498 pblk_rl_user_in(&pblk->rl, nr_entries);
499 spin_unlock(&rb->w_lock);
501 return io_ret;
505 * Look at pblk_rb_may_write_user comment
507 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
508 unsigned int *pos)
510 struct pblk *pblk = container_of(rb, struct pblk, rwb);
512 spin_lock(&rb->w_lock);
513 if (!pblk_rl_gc_may_insert(&pblk->rl, nr_entries)) {
514 spin_unlock(&rb->w_lock);
515 return 0;
518 if (!pblk_rb_may_write(rb, nr_entries, pos)) {
519 spin_unlock(&rb->w_lock);
520 return 0;
523 pblk_rl_gc_in(&pblk->rl, nr_entries);
524 spin_unlock(&rb->w_lock);
526 return 1;
530 * Read available entries on rb and add them to the given bio. To avoid a memory
531 * copy, a page reference to the write buffer is used to be added to the bio.
533 * This function is used by the write thread to form the write bio that will
534 * persist data on the write buffer to the media.
536 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
537 unsigned int pos, unsigned int nr_entries,
538 unsigned int count)
540 struct pblk *pblk = container_of(rb, struct pblk, rwb);
541 struct request_queue *q = pblk->dev->q;
542 struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
543 struct bio *bio = rqd->bio;
544 struct pblk_rb_entry *entry;
545 struct page *page;
546 unsigned int pad = 0, to_read = nr_entries;
547 unsigned int i;
548 int flags;
550 if (count < nr_entries) {
551 pad = nr_entries - count;
552 to_read = count;
555 c_ctx->sentry = pos;
556 c_ctx->nr_valid = to_read;
557 c_ctx->nr_padded = pad;
559 for (i = 0; i < to_read; i++) {
560 entry = &rb->entries[pos];
562 /* A write has been allowed into the buffer, but data is still
563 * being copied to it. It is ok to busy wait.
565 try:
566 flags = READ_ONCE(entry->w_ctx.flags);
567 if (!(flags & PBLK_WRITTEN_DATA)) {
568 io_schedule();
569 goto try;
572 page = virt_to_page(entry->data);
573 if (!page) {
574 pblk_err(pblk, "could not allocate write bio page\n");
575 flags &= ~PBLK_WRITTEN_DATA;
576 flags |= PBLK_SUBMITTED_ENTRY;
577 /* Release flags on context. Protect from writes */
578 smp_store_release(&entry->w_ctx.flags, flags);
579 return NVM_IO_ERR;
582 if (bio_add_pc_page(q, bio, page, rb->seg_size, 0) !=
583 rb->seg_size) {
584 pblk_err(pblk, "could not add page to write bio\n");
585 flags &= ~PBLK_WRITTEN_DATA;
586 flags |= PBLK_SUBMITTED_ENTRY;
587 /* Release flags on context. Protect from writes */
588 smp_store_release(&entry->w_ctx.flags, flags);
589 return NVM_IO_ERR;
592 flags &= ~PBLK_WRITTEN_DATA;
593 flags |= PBLK_SUBMITTED_ENTRY;
595 /* Release flags on context. Protect from writes */
596 smp_store_release(&entry->w_ctx.flags, flags);
598 pos = pblk_rb_ptr_wrap(rb, pos, 1);
601 if (pad) {
602 if (pblk_bio_add_pages(pblk, bio, GFP_KERNEL, pad)) {
603 pblk_err(pblk, "could not pad page in write bio\n");
604 return NVM_IO_ERR;
607 if (pad < pblk->min_write_pgs)
608 atomic64_inc(&pblk->pad_dist[pad - 1]);
609 else
610 pblk_warn(pblk, "padding more than min. sectors\n");
612 atomic64_add(pad, &pblk->pad_wa);
615 #ifdef CONFIG_NVM_PBLK_DEBUG
616 atomic_long_add(pad, &pblk->padded_writes);
617 #endif
619 return NVM_IO_OK;
623 * Copy to bio only if the lba matches the one on the given cache entry.
624 * Otherwise, it means that the entry has been overwritten, and the bio should
625 * be directed to disk.
627 int pblk_rb_copy_to_bio(struct pblk_rb *rb, struct bio *bio, sector_t lba,
628 struct ppa_addr ppa, int bio_iter, bool advanced_bio)
630 struct pblk *pblk = container_of(rb, struct pblk, rwb);
631 struct pblk_rb_entry *entry;
632 struct pblk_w_ctx *w_ctx;
633 struct ppa_addr l2p_ppa;
634 u64 pos = pblk_addr_to_cacheline(ppa);
635 void *data;
636 int flags;
637 int ret = 1;
640 #ifdef CONFIG_NVM_PBLK_DEBUG
641 /* Caller must ensure that the access will not cause an overflow */
642 BUG_ON(pos >= rb->nr_entries);
643 #endif
644 entry = &rb->entries[pos];
645 w_ctx = &entry->w_ctx;
646 flags = READ_ONCE(w_ctx->flags);
648 spin_lock(&rb->w_lock);
649 spin_lock(&pblk->trans_lock);
650 l2p_ppa = pblk_trans_map_get(pblk, lba);
651 spin_unlock(&pblk->trans_lock);
653 /* Check if the entry has been overwritten or is scheduled to be */
654 if (!pblk_ppa_comp(l2p_ppa, ppa) || w_ctx->lba != lba ||
655 flags & PBLK_WRITABLE_ENTRY) {
656 ret = 0;
657 goto out;
660 /* Only advance the bio if it hasn't been advanced already. If advanced,
661 * this bio is at least a partial bio (i.e., it has partially been
662 * filled with data from the cache). If part of the data resides on the
663 * media, we will read later on
665 if (unlikely(!advanced_bio))
666 bio_advance(bio, bio_iter * PBLK_EXPOSED_PAGE_SIZE);
668 data = bio_data(bio);
669 memcpy(data, entry->data, rb->seg_size);
671 out:
672 spin_unlock(&rb->w_lock);
673 return ret;
676 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos)
678 unsigned int entry = pblk_rb_ptr_wrap(rb, pos, 0);
680 return &rb->entries[entry].w_ctx;
683 unsigned int pblk_rb_sync_init(struct pblk_rb *rb, unsigned long *flags)
684 __acquires(&rb->s_lock)
686 if (flags)
687 spin_lock_irqsave(&rb->s_lock, *flags);
688 else
689 spin_lock_irq(&rb->s_lock);
691 return rb->sync;
694 void pblk_rb_sync_end(struct pblk_rb *rb, unsigned long *flags)
695 __releases(&rb->s_lock)
697 lockdep_assert_held(&rb->s_lock);
699 if (flags)
700 spin_unlock_irqrestore(&rb->s_lock, *flags);
701 else
702 spin_unlock_irq(&rb->s_lock);
705 unsigned int pblk_rb_sync_advance(struct pblk_rb *rb, unsigned int nr_entries)
707 unsigned int sync, flush_point;
708 lockdep_assert_held(&rb->s_lock);
710 sync = READ_ONCE(rb->sync);
711 flush_point = READ_ONCE(rb->flush_point);
713 if (flush_point != EMPTY_ENTRY) {
714 unsigned int secs_to_flush;
716 secs_to_flush = pblk_rb_ring_count(flush_point, sync,
717 rb->nr_entries);
718 if (secs_to_flush < nr_entries) {
719 /* Protect flush points */
720 smp_store_release(&rb->flush_point, EMPTY_ENTRY);
724 sync = pblk_rb_ptr_wrap(rb, sync, nr_entries);
726 /* Protect from counts */
727 smp_store_release(&rb->sync, sync);
729 return sync;
732 /* Calculate how many sectors to submit up to the current flush point. */
733 unsigned int pblk_rb_flush_point_count(struct pblk_rb *rb)
735 unsigned int subm, sync, flush_point;
736 unsigned int submitted, to_flush;
738 /* Protect flush points */
739 flush_point = smp_load_acquire(&rb->flush_point);
740 if (flush_point == EMPTY_ENTRY)
741 return 0;
743 /* Protect syncs */
744 sync = smp_load_acquire(&rb->sync);
746 subm = READ_ONCE(rb->subm);
747 submitted = pblk_rb_ring_count(subm, sync, rb->nr_entries);
749 /* The sync point itself counts as a sector to sync */
750 to_flush = pblk_rb_ring_count(flush_point, sync, rb->nr_entries) + 1;
752 return (submitted < to_flush) ? (to_flush - submitted) : 0;
755 int pblk_rb_tear_down_check(struct pblk_rb *rb)
757 struct pblk_rb_entry *entry;
758 int i;
759 int ret = 0;
761 spin_lock(&rb->w_lock);
762 spin_lock_irq(&rb->s_lock);
764 if ((rb->mem == rb->subm) && (rb->subm == rb->sync) &&
765 (rb->sync == rb->l2p_update) &&
766 (rb->flush_point == EMPTY_ENTRY)) {
767 goto out;
770 if (!rb->entries) {
771 ret = 1;
772 goto out;
775 for (i = 0; i < rb->nr_entries; i++) {
776 entry = &rb->entries[i];
778 if (!entry->data) {
779 ret = 1;
780 goto out;
784 out:
785 spin_unlock(&rb->w_lock);
786 spin_unlock_irq(&rb->s_lock);
788 return ret;
791 unsigned int pblk_rb_wrap_pos(struct pblk_rb *rb, unsigned int pos)
793 return (pos & (rb->nr_entries - 1));
796 int pblk_rb_pos_oob(struct pblk_rb *rb, u64 pos)
798 return (pos >= rb->nr_entries);
801 ssize_t pblk_rb_sysfs(struct pblk_rb *rb, char *buf)
803 struct pblk *pblk = container_of(rb, struct pblk, rwb);
804 struct pblk_c_ctx *c;
805 ssize_t offset;
806 int queued_entries = 0;
808 spin_lock_irq(&rb->s_lock);
809 list_for_each_entry(c, &pblk->compl_list, list)
810 queued_entries++;
811 spin_unlock_irq(&rb->s_lock);
813 if (rb->flush_point != EMPTY_ENTRY)
814 offset = scnprintf(buf, PAGE_SIZE,
815 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
816 rb->nr_entries,
817 rb->mem,
818 rb->subm,
819 rb->sync,
820 rb->l2p_update,
821 #ifdef CONFIG_NVM_PBLK_DEBUG
822 atomic_read(&rb->inflight_flush_point),
823 #else
825 #endif
826 rb->flush_point,
827 pblk_rb_read_count(rb),
828 pblk_rb_space(rb),
829 pblk_rb_flush_point_count(rb),
830 queued_entries);
831 else
832 offset = scnprintf(buf, PAGE_SIZE,
833 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
834 rb->nr_entries,
835 rb->mem,
836 rb->subm,
837 rb->sync,
838 rb->l2p_update,
839 #ifdef CONFIG_NVM_PBLK_DEBUG
840 atomic_read(&rb->inflight_flush_point),
841 #else
843 #endif
844 pblk_rb_read_count(rb),
845 pblk_rb_space(rb),
846 pblk_rb_flush_point_count(rb),
847 queued_entries);
849 return offset;