2 * Copyright (C) 2016 CNEX Labs
3 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5 * Based upon the circular ringbuffer.
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * pblk-rb.c - pblk's write buffer
19 #include <linux/circ_buf.h>
23 static DECLARE_RWSEM(pblk_rb_lock
);
25 void pblk_rb_data_free(struct pblk_rb
*rb
)
27 struct pblk_rb_pages
*p
, *t
;
29 down_write(&pblk_rb_lock
);
30 list_for_each_entry_safe(p
, t
, &rb
->pages
, list
) {
31 free_pages((unsigned long)page_address(p
->pages
), p
->order
);
35 up_write(&pblk_rb_lock
);
39 * Initialize ring buffer. The data and metadata buffers must be previously
40 * allocated and their size must be a power of two
41 * (Documentation/core-api/circular-buffers.rst)
43 int pblk_rb_init(struct pblk_rb
*rb
, struct pblk_rb_entry
*rb_entry_base
,
44 unsigned int power_size
, unsigned int power_seg_sz
)
46 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
47 unsigned int init_entry
= 0;
48 unsigned int alloc_order
= power_size
;
49 unsigned int max_order
= MAX_ORDER
- 1;
50 unsigned int order
, iter
;
52 down_write(&pblk_rb_lock
);
53 rb
->entries
= rb_entry_base
;
54 rb
->seg_size
= (1 << power_seg_sz
);
55 rb
->nr_entries
= (1 << power_size
);
56 rb
->mem
= rb
->subm
= rb
->sync
= rb
->l2p_update
= 0;
57 rb
->flush_point
= EMPTY_ENTRY
;
59 spin_lock_init(&rb
->w_lock
);
60 spin_lock_init(&rb
->s_lock
);
62 INIT_LIST_HEAD(&rb
->pages
);
64 if (alloc_order
>= max_order
) {
66 iter
= (1 << (alloc_order
- max_order
));
73 struct pblk_rb_entry
*entry
;
74 struct pblk_rb_pages
*page_set
;
76 unsigned long set_size
;
79 page_set
= kmalloc(sizeof(struct pblk_rb_pages
), GFP_KERNEL
);
81 up_write(&pblk_rb_lock
);
85 page_set
->order
= order
;
86 page_set
->pages
= alloc_pages(GFP_KERNEL
, order
);
87 if (!page_set
->pages
) {
89 pblk_rb_data_free(rb
);
90 up_write(&pblk_rb_lock
);
93 kaddr
= page_address(page_set
->pages
);
95 entry
= &rb
->entries
[init_entry
];
97 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
98 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
100 set_size
= (1 << order
);
101 for (i
= 1; i
< set_size
; i
++) {
102 entry
= &rb
->entries
[init_entry
];
103 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
104 entry
->data
= kaddr
+ (i
* rb
->seg_size
);
105 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
106 bio_list_init(&entry
->w_ctx
.bios
);
109 list_add_tail(&page_set
->list
, &rb
->pages
);
112 up_write(&pblk_rb_lock
);
114 #ifdef CONFIG_NVM_DEBUG
115 atomic_set(&rb
->inflight_flush_point
, 0);
119 * Initialize rate-limiter, which controls access to the write buffer
120 * but user and GC I/O
122 pblk_rl_init(&pblk
->rl
, rb
->nr_entries
);
128 * pblk_rb_calculate_size -- calculate the size of the write buffer
130 unsigned int pblk_rb_calculate_size(unsigned int nr_entries
)
132 /* Alloc a write buffer that can at least fit 128 entries */
133 return (1 << max(get_count_order(nr_entries
), 7));
136 void *pblk_rb_entries_ref(struct pblk_rb
*rb
)
141 static void clean_wctx(struct pblk_w_ctx
*w_ctx
)
145 flags
= READ_ONCE(w_ctx
->flags
);
146 WARN_ONCE(!(flags
& PBLK_SUBMITTED_ENTRY
),
147 "pblk: overwriting unsubmitted data\n");
149 /* Release flags on context. Protect from writes and reads */
150 smp_store_release(&w_ctx
->flags
, PBLK_WRITABLE_ENTRY
);
151 pblk_ppa_set_empty(&w_ctx
->ppa
);
152 w_ctx
->lba
= ADDR_EMPTY
;
155 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
156 #define pblk_rb_ring_space(rb, head, tail, size) \
157 (CIRC_SPACE(head, tail, size))
160 * Buffer space is calculated with respect to the back pointer signaling
161 * synchronized entries to the media.
163 static unsigned int pblk_rb_space(struct pblk_rb
*rb
)
165 unsigned int mem
= READ_ONCE(rb
->mem
);
166 unsigned int sync
= READ_ONCE(rb
->sync
);
168 return pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
);
172 * Buffer count is calculated with respect to the submission entry signaling the
173 * entries that are available to send to the media
175 unsigned int pblk_rb_read_count(struct pblk_rb
*rb
)
177 unsigned int mem
= READ_ONCE(rb
->mem
);
178 unsigned int subm
= READ_ONCE(rb
->subm
);
180 return pblk_rb_ring_count(mem
, subm
, rb
->nr_entries
);
183 unsigned int pblk_rb_sync_count(struct pblk_rb
*rb
)
185 unsigned int mem
= READ_ONCE(rb
->mem
);
186 unsigned int sync
= READ_ONCE(rb
->sync
);
188 return pblk_rb_ring_count(mem
, sync
, rb
->nr_entries
);
191 unsigned int pblk_rb_read_commit(struct pblk_rb
*rb
, unsigned int nr_entries
)
195 subm
= READ_ONCE(rb
->subm
);
196 /* Commit read means updating submission pointer */
197 smp_store_release(&rb
->subm
,
198 (subm
+ nr_entries
) & (rb
->nr_entries
- 1));
203 static int __pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int to_update
)
205 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
206 struct pblk_line
*line
;
207 struct pblk_rb_entry
*entry
;
208 struct pblk_w_ctx
*w_ctx
;
209 unsigned int user_io
= 0, gc_io
= 0;
213 for (i
= 0; i
< to_update
; i
++) {
214 entry
= &rb
->entries
[rb
->l2p_update
];
215 w_ctx
= &entry
->w_ctx
;
217 flags
= READ_ONCE(entry
->w_ctx
.flags
);
218 if (flags
& PBLK_IOTYPE_USER
)
220 else if (flags
& PBLK_IOTYPE_GC
)
223 WARN(1, "pblk: unknown IO type\n");
225 pblk_update_map_dev(pblk
, w_ctx
->lba
, w_ctx
->ppa
,
228 line
= &pblk
->lines
[pblk_ppa_to_line(w_ctx
->ppa
)];
229 kref_put(&line
->ref
, pblk_line_put
);
231 rb
->l2p_update
= (rb
->l2p_update
+ 1) & (rb
->nr_entries
- 1);
234 pblk_rl_out(&pblk
->rl
, user_io
, gc_io
);
240 * When we move the l2p_update pointer, we update the l2p table - lookups will
241 * point to the physical address instead of to the cacheline in the write buffer
242 * from this moment on.
244 static int pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int nr_entries
,
245 unsigned int mem
, unsigned int sync
)
247 unsigned int space
, count
;
250 lockdep_assert_held(&rb
->w_lock
);
252 /* Update l2p only as buffer entries are being overwritten */
253 space
= pblk_rb_ring_space(rb
, mem
, rb
->l2p_update
, rb
->nr_entries
);
254 if (space
> nr_entries
)
257 count
= nr_entries
- space
;
258 /* l2p_update used exclusively under rb->w_lock */
259 ret
= __pblk_rb_update_l2p(rb
, count
);
266 * Update the l2p entry for all sectors stored on the write buffer. This means
267 * that all future lookups to the l2p table will point to a device address, not
268 * to the cacheline in the write buffer.
270 void pblk_rb_sync_l2p(struct pblk_rb
*rb
)
273 unsigned int to_update
;
275 spin_lock(&rb
->w_lock
);
277 /* Protect from reads and writes */
278 sync
= smp_load_acquire(&rb
->sync
);
280 to_update
= pblk_rb_ring_count(sync
, rb
->l2p_update
, rb
->nr_entries
);
281 __pblk_rb_update_l2p(rb
, to_update
);
283 spin_unlock(&rb
->w_lock
);
287 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
288 * Typically, 4KB data chunks coming from a bio will be copied to the ring
289 * buffer, thus the write will fail if not all incoming data can be copied.
292 static void __pblk_rb_write_entry(struct pblk_rb
*rb
, void *data
,
293 struct pblk_w_ctx w_ctx
,
294 struct pblk_rb_entry
*entry
)
296 memcpy(entry
->data
, data
, rb
->seg_size
);
298 entry
->w_ctx
.lba
= w_ctx
.lba
;
299 entry
->w_ctx
.ppa
= w_ctx
.ppa
;
302 void pblk_rb_write_entry_user(struct pblk_rb
*rb
, void *data
,
303 struct pblk_w_ctx w_ctx
, unsigned int ring_pos
)
305 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
306 struct pblk_rb_entry
*entry
;
309 entry
= &rb
->entries
[ring_pos
];
310 flags
= READ_ONCE(entry
->w_ctx
.flags
);
311 #ifdef CONFIG_NVM_DEBUG
312 /* Caller must guarantee that the entry is free */
313 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
316 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
318 pblk_update_map_cache(pblk
, w_ctx
.lba
, entry
->cacheline
);
319 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
321 /* Release flags on write context. Protect from writes */
322 smp_store_release(&entry
->w_ctx
.flags
, flags
);
325 void pblk_rb_write_entry_gc(struct pblk_rb
*rb
, void *data
,
326 struct pblk_w_ctx w_ctx
, struct pblk_line
*line
,
327 u64 paddr
, unsigned int ring_pos
)
329 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
330 struct pblk_rb_entry
*entry
;
333 entry
= &rb
->entries
[ring_pos
];
334 flags
= READ_ONCE(entry
->w_ctx
.flags
);
335 #ifdef CONFIG_NVM_DEBUG
336 /* Caller must guarantee that the entry is free */
337 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
340 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
342 if (!pblk_update_map_gc(pblk
, w_ctx
.lba
, entry
->cacheline
, line
, paddr
))
343 entry
->w_ctx
.lba
= ADDR_EMPTY
;
345 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
347 /* Release flags on write context. Protect from writes */
348 smp_store_release(&entry
->w_ctx
.flags
, flags
);
351 static int pblk_rb_flush_point_set(struct pblk_rb
*rb
, struct bio
*bio
,
354 struct pblk_rb_entry
*entry
;
355 unsigned int sync
, flush_point
;
357 pblk_rb_sync_init(rb
, NULL
);
358 sync
= READ_ONCE(rb
->sync
);
361 pblk_rb_sync_end(rb
, NULL
);
365 #ifdef CONFIG_NVM_DEBUG
366 atomic_inc(&rb
->inflight_flush_point
);
369 flush_point
= (pos
== 0) ? (rb
->nr_entries
- 1) : (pos
- 1);
370 entry
= &rb
->entries
[flush_point
];
372 /* Protect flush points */
373 smp_store_release(&rb
->flush_point
, flush_point
);
376 bio_list_add(&entry
->w_ctx
.bios
, bio
);
378 pblk_rb_sync_end(rb
, NULL
);
383 static int __pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
389 sync
= READ_ONCE(rb
->sync
);
390 mem
= READ_ONCE(rb
->mem
);
392 if (pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
) < nr_entries
)
395 if (pblk_rb_update_l2p(rb
, nr_entries
, mem
, sync
))
403 static int pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
406 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
409 /* Protect from read count */
410 smp_store_release(&rb
->mem
, (*pos
+ nr_entries
) & (rb
->nr_entries
- 1));
414 void pblk_rb_flush(struct pblk_rb
*rb
)
416 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
417 unsigned int mem
= READ_ONCE(rb
->mem
);
419 if (pblk_rb_flush_point_set(rb
, NULL
, mem
))
422 pblk_write_kick(pblk
);
425 static int pblk_rb_may_write_flush(struct pblk_rb
*rb
, unsigned int nr_entries
,
426 unsigned int *pos
, struct bio
*bio
,
431 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
434 mem
= (*pos
+ nr_entries
) & (rb
->nr_entries
- 1);
435 *io_ret
= NVM_IO_DONE
;
437 if (bio
->bi_opf
& REQ_PREFLUSH
) {
438 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
440 atomic64_inc(&pblk
->nr_flush
);
441 if (pblk_rb_flush_point_set(&pblk
->rwb
, bio
, mem
))
445 /* Protect from read count */
446 smp_store_release(&rb
->mem
, mem
);
452 * Atomically check that (i) there is space on the write buffer for the
453 * incoming I/O, and (ii) the current I/O type has enough budget in the write
454 * buffer (rate-limiter).
456 int pblk_rb_may_write_user(struct pblk_rb
*rb
, struct bio
*bio
,
457 unsigned int nr_entries
, unsigned int *pos
)
459 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
462 spin_lock(&rb
->w_lock
);
463 io_ret
= pblk_rl_user_may_insert(&pblk
->rl
, nr_entries
);
465 spin_unlock(&rb
->w_lock
);
469 if (!pblk_rb_may_write_flush(rb
, nr_entries
, pos
, bio
, &io_ret
)) {
470 spin_unlock(&rb
->w_lock
);
471 return NVM_IO_REQUEUE
;
474 pblk_rl_user_in(&pblk
->rl
, nr_entries
);
475 spin_unlock(&rb
->w_lock
);
481 * Look at pblk_rb_may_write_user comment
483 int pblk_rb_may_write_gc(struct pblk_rb
*rb
, unsigned int nr_entries
,
486 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
488 spin_lock(&rb
->w_lock
);
489 if (!pblk_rl_gc_may_insert(&pblk
->rl
, nr_entries
)) {
490 spin_unlock(&rb
->w_lock
);
494 if (!pblk_rb_may_write(rb
, nr_entries
, pos
)) {
495 spin_unlock(&rb
->w_lock
);
499 pblk_rl_gc_in(&pblk
->rl
, nr_entries
);
500 spin_unlock(&rb
->w_lock
);
506 * Read available entries on rb and add them to the given bio. To avoid a memory
507 * copy, a page reference to the write buffer is used to be added to the bio.
509 * This function is used by the write thread to form the write bio that will
510 * persist data on the write buffer to the media.
512 unsigned int pblk_rb_read_to_bio(struct pblk_rb
*rb
, struct nvm_rq
*rqd
,
513 unsigned int pos
, unsigned int nr_entries
,
516 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
517 struct request_queue
*q
= pblk
->dev
->q
;
518 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
519 struct bio
*bio
= rqd
->bio
;
520 struct pblk_rb_entry
*entry
;
522 unsigned int pad
= 0, to_read
= nr_entries
;
526 if (count
< nr_entries
) {
527 pad
= nr_entries
- count
;
532 c_ctx
->nr_valid
= to_read
;
533 c_ctx
->nr_padded
= pad
;
535 for (i
= 0; i
< to_read
; i
++) {
536 entry
= &rb
->entries
[pos
];
538 /* A write has been allowed into the buffer, but data is still
539 * being copied to it. It is ok to busy wait.
542 flags
= READ_ONCE(entry
->w_ctx
.flags
);
543 if (!(flags
& PBLK_WRITTEN_DATA
)) {
548 page
= virt_to_page(entry
->data
);
550 pr_err("pblk: could not allocate write bio page\n");
551 flags
&= ~PBLK_WRITTEN_DATA
;
552 flags
|= PBLK_SUBMITTED_ENTRY
;
553 /* Release flags on context. Protect from writes */
554 smp_store_release(&entry
->w_ctx
.flags
, flags
);
558 if (bio_add_pc_page(q
, bio
, page
, rb
->seg_size
, 0) !=
560 pr_err("pblk: could not add page to write bio\n");
561 flags
&= ~PBLK_WRITTEN_DATA
;
562 flags
|= PBLK_SUBMITTED_ENTRY
;
563 /* Release flags on context. Protect from writes */
564 smp_store_release(&entry
->w_ctx
.flags
, flags
);
568 flags
&= ~PBLK_WRITTEN_DATA
;
569 flags
|= PBLK_SUBMITTED_ENTRY
;
571 /* Release flags on context. Protect from writes */
572 smp_store_release(&entry
->w_ctx
.flags
, flags
);
574 pos
= (pos
+ 1) & (rb
->nr_entries
- 1);
578 if (pblk_bio_add_pages(pblk
, bio
, GFP_KERNEL
, pad
)) {
579 pr_err("pblk: could not pad page in write bio\n");
583 if (pad
< pblk
->min_write_pgs
)
584 atomic64_inc(&pblk
->pad_dist
[pad
- 1]);
586 pr_warn("pblk: padding more than min. sectors\n");
588 atomic64_add(pad
, &pblk
->pad_wa
);
591 #ifdef CONFIG_NVM_DEBUG
592 atomic_long_add(pad
, &pblk
->padded_writes
);
599 * Copy to bio only if the lba matches the one on the given cache entry.
600 * Otherwise, it means that the entry has been overwritten, and the bio should
601 * be directed to disk.
603 int pblk_rb_copy_to_bio(struct pblk_rb
*rb
, struct bio
*bio
, sector_t lba
,
604 struct ppa_addr ppa
, int bio_iter
, bool advanced_bio
)
606 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
607 struct pblk_rb_entry
*entry
;
608 struct pblk_w_ctx
*w_ctx
;
609 struct ppa_addr l2p_ppa
;
610 u64 pos
= pblk_addr_to_cacheline(ppa
);
616 #ifdef CONFIG_NVM_DEBUG
617 /* Caller must ensure that the access will not cause an overflow */
618 BUG_ON(pos
>= rb
->nr_entries
);
620 entry
= &rb
->entries
[pos
];
621 w_ctx
= &entry
->w_ctx
;
622 flags
= READ_ONCE(w_ctx
->flags
);
624 spin_lock(&rb
->w_lock
);
625 spin_lock(&pblk
->trans_lock
);
626 l2p_ppa
= pblk_trans_map_get(pblk
, lba
);
627 spin_unlock(&pblk
->trans_lock
);
629 /* Check if the entry has been overwritten or is scheduled to be */
630 if (!pblk_ppa_comp(l2p_ppa
, ppa
) || w_ctx
->lba
!= lba
||
631 flags
& PBLK_WRITABLE_ENTRY
) {
636 /* Only advance the bio if it hasn't been advanced already. If advanced,
637 * this bio is at least a partial bio (i.e., it has partially been
638 * filled with data from the cache). If part of the data resides on the
639 * media, we will read later on
641 if (unlikely(!advanced_bio
))
642 bio_advance(bio
, bio_iter
* PBLK_EXPOSED_PAGE_SIZE
);
644 data
= bio_data(bio
);
645 memcpy(data
, entry
->data
, rb
->seg_size
);
648 spin_unlock(&rb
->w_lock
);
652 struct pblk_w_ctx
*pblk_rb_w_ctx(struct pblk_rb
*rb
, unsigned int pos
)
654 unsigned int entry
= pos
& (rb
->nr_entries
- 1);
656 return &rb
->entries
[entry
].w_ctx
;
659 unsigned int pblk_rb_sync_init(struct pblk_rb
*rb
, unsigned long *flags
)
660 __acquires(&rb
->s_lock
)
663 spin_lock_irqsave(&rb
->s_lock
, *flags
);
665 spin_lock_irq(&rb
->s_lock
);
670 void pblk_rb_sync_end(struct pblk_rb
*rb
, unsigned long *flags
)
671 __releases(&rb
->s_lock
)
673 lockdep_assert_held(&rb
->s_lock
);
676 spin_unlock_irqrestore(&rb
->s_lock
, *flags
);
678 spin_unlock_irq(&rb
->s_lock
);
681 unsigned int pblk_rb_sync_advance(struct pblk_rb
*rb
, unsigned int nr_entries
)
683 unsigned int sync
, flush_point
;
684 lockdep_assert_held(&rb
->s_lock
);
686 sync
= READ_ONCE(rb
->sync
);
687 flush_point
= READ_ONCE(rb
->flush_point
);
689 if (flush_point
!= EMPTY_ENTRY
) {
690 unsigned int secs_to_flush
;
692 secs_to_flush
= pblk_rb_ring_count(flush_point
, sync
,
694 if (secs_to_flush
< nr_entries
) {
695 /* Protect flush points */
696 smp_store_release(&rb
->flush_point
, EMPTY_ENTRY
);
700 sync
= (sync
+ nr_entries
) & (rb
->nr_entries
- 1);
702 /* Protect from counts */
703 smp_store_release(&rb
->sync
, sync
);
708 /* Calculate how many sectors to submit up to the current flush point. */
709 unsigned int pblk_rb_flush_point_count(struct pblk_rb
*rb
)
711 unsigned int subm
, sync
, flush_point
;
712 unsigned int submitted
, to_flush
;
714 /* Protect flush points */
715 flush_point
= smp_load_acquire(&rb
->flush_point
);
716 if (flush_point
== EMPTY_ENTRY
)
720 sync
= smp_load_acquire(&rb
->sync
);
722 subm
= READ_ONCE(rb
->subm
);
723 submitted
= pblk_rb_ring_count(subm
, sync
, rb
->nr_entries
);
725 /* The sync point itself counts as a sector to sync */
726 to_flush
= pblk_rb_ring_count(flush_point
, sync
, rb
->nr_entries
) + 1;
728 return (submitted
< to_flush
) ? (to_flush
- submitted
) : 0;
732 * Scan from the current position of the sync pointer to find the entry that
733 * corresponds to the given ppa. This is necessary since write requests can be
734 * completed out of order. The assumption is that the ppa is close to the sync
735 * pointer thus the search will not take long.
737 * The caller of this function must guarantee that the sync pointer will no
738 * reach the entry while it is using the metadata associated with it. With this
739 * assumption in mind, there is no need to take the sync lock.
741 struct pblk_rb_entry
*pblk_rb_sync_scan_entry(struct pblk_rb
*rb
,
742 struct ppa_addr
*ppa
)
744 unsigned int sync
, subm
, count
;
747 sync
= READ_ONCE(rb
->sync
);
748 subm
= READ_ONCE(rb
->subm
);
749 count
= pblk_rb_ring_count(subm
, sync
, rb
->nr_entries
);
751 for (i
= 0; i
< count
; i
++)
752 sync
= (sync
+ 1) & (rb
->nr_entries
- 1);
757 int pblk_rb_tear_down_check(struct pblk_rb
*rb
)
759 struct pblk_rb_entry
*entry
;
763 spin_lock(&rb
->w_lock
);
764 spin_lock_irq(&rb
->s_lock
);
766 if ((rb
->mem
== rb
->subm
) && (rb
->subm
== rb
->sync
) &&
767 (rb
->sync
== rb
->l2p_update
) &&
768 (rb
->flush_point
== EMPTY_ENTRY
)) {
777 for (i
= 0; i
< rb
->nr_entries
; i
++) {
778 entry
= &rb
->entries
[i
];
787 spin_unlock(&rb
->w_lock
);
788 spin_unlock_irq(&rb
->s_lock
);
793 unsigned int pblk_rb_wrap_pos(struct pblk_rb
*rb
, unsigned int pos
)
795 return (pos
& (rb
->nr_entries
- 1));
798 int pblk_rb_pos_oob(struct pblk_rb
*rb
, u64 pos
)
800 return (pos
>= rb
->nr_entries
);
803 ssize_t
pblk_rb_sysfs(struct pblk_rb
*rb
, char *buf
)
805 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
806 struct pblk_c_ctx
*c
;
808 int queued_entries
= 0;
810 spin_lock_irq(&rb
->s_lock
);
811 list_for_each_entry(c
, &pblk
->compl_list
, list
)
813 spin_unlock_irq(&rb
->s_lock
);
815 if (rb
->flush_point
!= EMPTY_ENTRY
)
816 offset
= scnprintf(buf
, PAGE_SIZE
,
817 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
823 #ifdef CONFIG_NVM_DEBUG
824 atomic_read(&rb
->inflight_flush_point
),
829 pblk_rb_read_count(rb
),
831 pblk_rb_flush_point_count(rb
),
834 offset
= scnprintf(buf
, PAGE_SIZE
,
835 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
841 #ifdef CONFIG_NVM_DEBUG
842 atomic_read(&rb
->inflight_flush_point
),
846 pblk_rb_read_count(rb
),
848 pblk_rb_flush_point_count(rb
),