1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (C) 2016 CNEX Labs
4 * Initial release: Javier Gonzalez <javier@cnexlabs.com>
6 * Based upon the circular ringbuffer.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version
10 * 2 as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
17 * pblk-rb.c - pblk's write buffer
20 #include <linux/circ_buf.h>
24 static DECLARE_RWSEM(pblk_rb_lock
);
26 static void pblk_rb_data_free(struct pblk_rb
*rb
)
28 struct pblk_rb_pages
*p
, *t
;
30 down_write(&pblk_rb_lock
);
31 list_for_each_entry_safe(p
, t
, &rb
->pages
, list
) {
32 free_pages((unsigned long)page_address(p
->pages
), p
->order
);
36 up_write(&pblk_rb_lock
);
39 void pblk_rb_free(struct pblk_rb
*rb
)
41 pblk_rb_data_free(rb
);
46 * pblk_rb_calculate_size -- calculate the size of the write buffer
48 static unsigned int pblk_rb_calculate_size(unsigned int nr_entries
,
49 unsigned int threshold
)
51 unsigned int thr_sz
= 1 << (get_count_order(threshold
+ NVM_MAX_VLBA
));
52 unsigned int max_sz
= max(thr_sz
, nr_entries
);
55 /* Alloc a write buffer that can (i) fit at least two split bios
56 * (considering max I/O size NVM_MAX_VLBA, and (ii) guarantee that the
57 * threshold will be respected
59 max_io
= (1 << max((int)(get_count_order(max_sz
)),
60 (int)(get_count_order(NVM_MAX_VLBA
<< 1))));
61 if ((threshold
+ NVM_MAX_VLBA
) >= max_io
)
68 * Initialize ring buffer. The data and metadata buffers must be previously
69 * allocated and their size must be a power of two
70 * (Documentation/core-api/circular-buffers.rst)
72 int pblk_rb_init(struct pblk_rb
*rb
, unsigned int size
, unsigned int threshold
,
73 unsigned int seg_size
)
75 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
76 struct pblk_rb_entry
*entries
;
77 unsigned int init_entry
= 0;
78 unsigned int max_order
= MAX_ORDER
- 1;
79 unsigned int power_size
, power_seg_sz
;
80 unsigned int alloc_order
, order
, iter
;
81 unsigned int nr_entries
;
83 nr_entries
= pblk_rb_calculate_size(size
, threshold
);
84 entries
= vzalloc(array_size(nr_entries
, sizeof(struct pblk_rb_entry
)));
88 power_size
= get_count_order(nr_entries
);
89 power_seg_sz
= get_count_order(seg_size
);
91 down_write(&pblk_rb_lock
);
92 rb
->entries
= entries
;
93 rb
->seg_size
= (1 << power_seg_sz
);
94 rb
->nr_entries
= (1 << power_size
);
95 rb
->mem
= rb
->subm
= rb
->sync
= rb
->l2p_update
= 0;
96 rb
->back_thres
= threshold
;
97 rb
->flush_point
= EMPTY_ENTRY
;
99 spin_lock_init(&rb
->w_lock
);
100 spin_lock_init(&rb
->s_lock
);
102 INIT_LIST_HEAD(&rb
->pages
);
104 alloc_order
= power_size
;
105 if (alloc_order
>= max_order
) {
107 iter
= (1 << (alloc_order
- max_order
));
114 struct pblk_rb_entry
*entry
;
115 struct pblk_rb_pages
*page_set
;
117 unsigned long set_size
;
120 page_set
= kmalloc(sizeof(struct pblk_rb_pages
), GFP_KERNEL
);
122 up_write(&pblk_rb_lock
);
127 page_set
->order
= order
;
128 page_set
->pages
= alloc_pages(GFP_KERNEL
, order
);
129 if (!page_set
->pages
) {
131 pblk_rb_data_free(rb
);
132 up_write(&pblk_rb_lock
);
136 kaddr
= page_address(page_set
->pages
);
138 entry
= &rb
->entries
[init_entry
];
140 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
141 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
143 set_size
= (1 << order
);
144 for (i
= 1; i
< set_size
; i
++) {
145 entry
= &rb
->entries
[init_entry
];
146 entry
->cacheline
= pblk_cacheline_to_addr(init_entry
++);
147 entry
->data
= kaddr
+ (i
* rb
->seg_size
);
148 entry
->w_ctx
.flags
= PBLK_WRITABLE_ENTRY
;
149 bio_list_init(&entry
->w_ctx
.bios
);
152 list_add_tail(&page_set
->list
, &rb
->pages
);
155 up_write(&pblk_rb_lock
);
157 #ifdef CONFIG_NVM_PBLK_DEBUG
158 atomic_set(&rb
->inflight_flush_point
, 0);
162 * Initialize rate-limiter, which controls access to the write buffer
165 pblk_rl_init(&pblk
->rl
, rb
->nr_entries
, threshold
);
170 static void clean_wctx(struct pblk_w_ctx
*w_ctx
)
174 flags
= READ_ONCE(w_ctx
->flags
);
175 WARN_ONCE(!(flags
& PBLK_SUBMITTED_ENTRY
),
176 "pblk: overwriting unsubmitted data\n");
178 /* Release flags on context. Protect from writes and reads */
179 smp_store_release(&w_ctx
->flags
, PBLK_WRITABLE_ENTRY
);
180 pblk_ppa_set_empty(&w_ctx
->ppa
);
181 w_ctx
->lba
= ADDR_EMPTY
;
184 #define pblk_rb_ring_count(head, tail, size) CIRC_CNT(head, tail, size)
185 #define pblk_rb_ring_space(rb, head, tail, size) \
186 (CIRC_SPACE(head, tail, size))
189 * Buffer space is calculated with respect to the back pointer signaling
190 * synchronized entries to the media.
192 static unsigned int pblk_rb_space(struct pblk_rb
*rb
)
194 unsigned int mem
= READ_ONCE(rb
->mem
);
195 unsigned int sync
= READ_ONCE(rb
->sync
);
197 return pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
);
200 unsigned int pblk_rb_ptr_wrap(struct pblk_rb
*rb
, unsigned int p
,
201 unsigned int nr_entries
)
203 return (p
+ nr_entries
) & (rb
->nr_entries
- 1);
207 * Buffer count is calculated with respect to the submission entry signaling the
208 * entries that are available to send to the media
210 unsigned int pblk_rb_read_count(struct pblk_rb
*rb
)
212 unsigned int mem
= READ_ONCE(rb
->mem
);
213 unsigned int subm
= READ_ONCE(rb
->subm
);
215 return pblk_rb_ring_count(mem
, subm
, rb
->nr_entries
);
218 unsigned int pblk_rb_sync_count(struct pblk_rb
*rb
)
220 unsigned int mem
= READ_ONCE(rb
->mem
);
221 unsigned int sync
= READ_ONCE(rb
->sync
);
223 return pblk_rb_ring_count(mem
, sync
, rb
->nr_entries
);
226 unsigned int pblk_rb_read_commit(struct pblk_rb
*rb
, unsigned int nr_entries
)
230 subm
= READ_ONCE(rb
->subm
);
231 /* Commit read means updating submission pointer */
232 smp_store_release(&rb
->subm
, pblk_rb_ptr_wrap(rb
, subm
, nr_entries
));
237 static int __pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int to_update
)
239 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
240 struct pblk_line
*line
;
241 struct pblk_rb_entry
*entry
;
242 struct pblk_w_ctx
*w_ctx
;
243 unsigned int user_io
= 0, gc_io
= 0;
247 for (i
= 0; i
< to_update
; i
++) {
248 entry
= &rb
->entries
[rb
->l2p_update
];
249 w_ctx
= &entry
->w_ctx
;
251 flags
= READ_ONCE(entry
->w_ctx
.flags
);
252 if (flags
& PBLK_IOTYPE_USER
)
254 else if (flags
& PBLK_IOTYPE_GC
)
257 WARN(1, "pblk: unknown IO type\n");
259 pblk_update_map_dev(pblk
, w_ctx
->lba
, w_ctx
->ppa
,
262 line
= pblk_ppa_to_line(pblk
, w_ctx
->ppa
);
263 atomic_dec(&line
->sec_to_update
);
264 kref_put(&line
->ref
, pblk_line_put
);
266 rb
->l2p_update
= pblk_rb_ptr_wrap(rb
, rb
->l2p_update
, 1);
269 pblk_rl_out(&pblk
->rl
, user_io
, gc_io
);
275 * When we move the l2p_update pointer, we update the l2p table - lookups will
276 * point to the physical address instead of to the cacheline in the write buffer
277 * from this moment on.
279 static int pblk_rb_update_l2p(struct pblk_rb
*rb
, unsigned int nr_entries
,
280 unsigned int mem
, unsigned int sync
)
282 unsigned int space
, count
;
285 lockdep_assert_held(&rb
->w_lock
);
287 /* Update l2p only as buffer entries are being overwritten */
288 space
= pblk_rb_ring_space(rb
, mem
, rb
->l2p_update
, rb
->nr_entries
);
289 if (space
> nr_entries
)
292 count
= nr_entries
- space
;
293 /* l2p_update used exclusively under rb->w_lock */
294 ret
= __pblk_rb_update_l2p(rb
, count
);
301 * Update the l2p entry for all sectors stored on the write buffer. This means
302 * that all future lookups to the l2p table will point to a device address, not
303 * to the cacheline in the write buffer.
305 void pblk_rb_sync_l2p(struct pblk_rb
*rb
)
308 unsigned int to_update
;
310 spin_lock(&rb
->w_lock
);
312 /* Protect from reads and writes */
313 sync
= smp_load_acquire(&rb
->sync
);
315 to_update
= pblk_rb_ring_count(sync
, rb
->l2p_update
, rb
->nr_entries
);
316 __pblk_rb_update_l2p(rb
, to_update
);
318 spin_unlock(&rb
->w_lock
);
322 * Write @nr_entries to ring buffer from @data buffer if there is enough space.
323 * Typically, 4KB data chunks coming from a bio will be copied to the ring
324 * buffer, thus the write will fail if not all incoming data can be copied.
327 static void __pblk_rb_write_entry(struct pblk_rb
*rb
, void *data
,
328 struct pblk_w_ctx w_ctx
,
329 struct pblk_rb_entry
*entry
)
331 memcpy(entry
->data
, data
, rb
->seg_size
);
333 entry
->w_ctx
.lba
= w_ctx
.lba
;
334 entry
->w_ctx
.ppa
= w_ctx
.ppa
;
337 void pblk_rb_write_entry_user(struct pblk_rb
*rb
, void *data
,
338 struct pblk_w_ctx w_ctx
, unsigned int ring_pos
)
340 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
341 struct pblk_rb_entry
*entry
;
344 entry
= &rb
->entries
[ring_pos
];
345 flags
= READ_ONCE(entry
->w_ctx
.flags
);
346 #ifdef CONFIG_NVM_PBLK_DEBUG
347 /* Caller must guarantee that the entry is free */
348 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
351 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
353 pblk_update_map_cache(pblk
, w_ctx
.lba
, entry
->cacheline
);
354 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
356 /* Release flags on write context. Protect from writes */
357 smp_store_release(&entry
->w_ctx
.flags
, flags
);
360 void pblk_rb_write_entry_gc(struct pblk_rb
*rb
, void *data
,
361 struct pblk_w_ctx w_ctx
, struct pblk_line
*line
,
362 u64 paddr
, unsigned int ring_pos
)
364 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
365 struct pblk_rb_entry
*entry
;
368 entry
= &rb
->entries
[ring_pos
];
369 flags
= READ_ONCE(entry
->w_ctx
.flags
);
370 #ifdef CONFIG_NVM_PBLK_DEBUG
371 /* Caller must guarantee that the entry is free */
372 BUG_ON(!(flags
& PBLK_WRITABLE_ENTRY
));
375 __pblk_rb_write_entry(rb
, data
, w_ctx
, entry
);
377 if (!pblk_update_map_gc(pblk
, w_ctx
.lba
, entry
->cacheline
, line
, paddr
))
378 entry
->w_ctx
.lba
= ADDR_EMPTY
;
380 flags
= w_ctx
.flags
| PBLK_WRITTEN_DATA
;
382 /* Release flags on write context. Protect from writes */
383 smp_store_release(&entry
->w_ctx
.flags
, flags
);
386 static int pblk_rb_flush_point_set(struct pblk_rb
*rb
, struct bio
*bio
,
389 struct pblk_rb_entry
*entry
;
390 unsigned int sync
, flush_point
;
392 pblk_rb_sync_init(rb
, NULL
);
393 sync
= READ_ONCE(rb
->sync
);
396 pblk_rb_sync_end(rb
, NULL
);
400 #ifdef CONFIG_NVM_PBLK_DEBUG
401 atomic_inc(&rb
->inflight_flush_point
);
404 flush_point
= (pos
== 0) ? (rb
->nr_entries
- 1) : (pos
- 1);
405 entry
= &rb
->entries
[flush_point
];
407 /* Protect flush points */
408 smp_store_release(&rb
->flush_point
, flush_point
);
411 bio_list_add(&entry
->w_ctx
.bios
, bio
);
413 pblk_rb_sync_end(rb
, NULL
);
418 static int __pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
423 unsigned int threshold
;
425 sync
= READ_ONCE(rb
->sync
);
426 mem
= READ_ONCE(rb
->mem
);
428 threshold
= nr_entries
+ rb
->back_thres
;
430 if (pblk_rb_ring_space(rb
, mem
, sync
, rb
->nr_entries
) < threshold
)
433 if (pblk_rb_update_l2p(rb
, nr_entries
, mem
, sync
))
441 static int pblk_rb_may_write(struct pblk_rb
*rb
, unsigned int nr_entries
,
444 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
447 /* Protect from read count */
448 smp_store_release(&rb
->mem
, pblk_rb_ptr_wrap(rb
, *pos
, nr_entries
));
452 void pblk_rb_flush(struct pblk_rb
*rb
)
454 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
455 unsigned int mem
= READ_ONCE(rb
->mem
);
457 if (pblk_rb_flush_point_set(rb
, NULL
, mem
))
460 pblk_write_kick(pblk
);
463 static int pblk_rb_may_write_flush(struct pblk_rb
*rb
, unsigned int nr_entries
,
464 unsigned int *pos
, struct bio
*bio
,
469 if (!__pblk_rb_may_write(rb
, nr_entries
, pos
))
472 mem
= pblk_rb_ptr_wrap(rb
, *pos
, nr_entries
);
473 *io_ret
= NVM_IO_DONE
;
475 if (bio
->bi_opf
& REQ_PREFLUSH
) {
476 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
478 atomic64_inc(&pblk
->nr_flush
);
479 if (pblk_rb_flush_point_set(&pblk
->rwb
, bio
, mem
))
483 /* Protect from read count */
484 smp_store_release(&rb
->mem
, mem
);
490 * Atomically check that (i) there is space on the write buffer for the
491 * incoming I/O, and (ii) the current I/O type has enough budget in the write
492 * buffer (rate-limiter).
494 int pblk_rb_may_write_user(struct pblk_rb
*rb
, struct bio
*bio
,
495 unsigned int nr_entries
, unsigned int *pos
)
497 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
500 spin_lock(&rb
->w_lock
);
501 io_ret
= pblk_rl_user_may_insert(&pblk
->rl
, nr_entries
);
503 spin_unlock(&rb
->w_lock
);
507 if (!pblk_rb_may_write_flush(rb
, nr_entries
, pos
, bio
, &io_ret
)) {
508 spin_unlock(&rb
->w_lock
);
509 return NVM_IO_REQUEUE
;
512 pblk_rl_user_in(&pblk
->rl
, nr_entries
);
513 spin_unlock(&rb
->w_lock
);
519 * Look at pblk_rb_may_write_user comment
521 int pblk_rb_may_write_gc(struct pblk_rb
*rb
, unsigned int nr_entries
,
524 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
526 spin_lock(&rb
->w_lock
);
527 if (!pblk_rl_gc_may_insert(&pblk
->rl
, nr_entries
)) {
528 spin_unlock(&rb
->w_lock
);
532 if (!pblk_rb_may_write(rb
, nr_entries
, pos
)) {
533 spin_unlock(&rb
->w_lock
);
537 pblk_rl_gc_in(&pblk
->rl
, nr_entries
);
538 spin_unlock(&rb
->w_lock
);
544 * Read available entries on rb and add them to the given bio. To avoid a memory
545 * copy, a page reference to the write buffer is used to be added to the bio.
547 * This function is used by the write thread to form the write bio that will
548 * persist data on the write buffer to the media.
550 unsigned int pblk_rb_read_to_bio(struct pblk_rb
*rb
, struct nvm_rq
*rqd
,
551 unsigned int pos
, unsigned int nr_entries
,
554 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
555 struct request_queue
*q
= pblk
->dev
->q
;
556 struct pblk_c_ctx
*c_ctx
= nvm_rq_to_pdu(rqd
);
557 struct bio
*bio
= rqd
->bio
;
558 struct pblk_rb_entry
*entry
;
560 unsigned int pad
= 0, to_read
= nr_entries
;
564 if (count
< nr_entries
) {
565 pad
= nr_entries
- count
;
569 /* Add space for packed metadata if in use*/
570 pad
+= (pblk
->min_write_pgs
- pblk
->min_write_pgs_data
);
573 c_ctx
->nr_valid
= to_read
;
574 c_ctx
->nr_padded
= pad
;
576 for (i
= 0; i
< to_read
; i
++) {
577 entry
= &rb
->entries
[pos
];
579 /* A write has been allowed into the buffer, but data is still
580 * being copied to it. It is ok to busy wait.
583 flags
= READ_ONCE(entry
->w_ctx
.flags
);
584 if (!(flags
& PBLK_WRITTEN_DATA
)) {
589 page
= virt_to_page(entry
->data
);
591 pblk_err(pblk
, "could not allocate write bio page\n");
592 flags
&= ~PBLK_WRITTEN_DATA
;
593 flags
|= PBLK_SUBMITTED_ENTRY
;
594 /* Release flags on context. Protect from writes */
595 smp_store_release(&entry
->w_ctx
.flags
, flags
);
599 if (bio_add_pc_page(q
, bio
, page
, rb
->seg_size
, 0) !=
601 pblk_err(pblk
, "could not add page to write bio\n");
602 flags
&= ~PBLK_WRITTEN_DATA
;
603 flags
|= PBLK_SUBMITTED_ENTRY
;
604 /* Release flags on context. Protect from writes */
605 smp_store_release(&entry
->w_ctx
.flags
, flags
);
609 flags
&= ~PBLK_WRITTEN_DATA
;
610 flags
|= PBLK_SUBMITTED_ENTRY
;
612 /* Release flags on context. Protect from writes */
613 smp_store_release(&entry
->w_ctx
.flags
, flags
);
615 pos
= pblk_rb_ptr_wrap(rb
, pos
, 1);
619 if (pblk_bio_add_pages(pblk
, bio
, GFP_KERNEL
, pad
)) {
620 pblk_err(pblk
, "could not pad page in write bio\n");
624 if (pad
< pblk
->min_write_pgs
)
625 atomic64_inc(&pblk
->pad_dist
[pad
- 1]);
627 pblk_warn(pblk
, "padding more than min. sectors\n");
629 atomic64_add(pad
, &pblk
->pad_wa
);
632 #ifdef CONFIG_NVM_PBLK_DEBUG
633 atomic_long_add(pad
, &pblk
->padded_writes
);
640 * Copy to bio only if the lba matches the one on the given cache entry.
641 * Otherwise, it means that the entry has been overwritten, and the bio should
642 * be directed to disk.
644 int pblk_rb_copy_to_bio(struct pblk_rb
*rb
, struct bio
*bio
, sector_t lba
,
647 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
648 struct pblk_rb_entry
*entry
;
649 struct pblk_w_ctx
*w_ctx
;
650 struct ppa_addr l2p_ppa
;
651 u64 pos
= pblk_addr_to_cacheline(ppa
);
657 #ifdef CONFIG_NVM_PBLK_DEBUG
658 /* Caller must ensure that the access will not cause an overflow */
659 BUG_ON(pos
>= rb
->nr_entries
);
661 entry
= &rb
->entries
[pos
];
662 w_ctx
= &entry
->w_ctx
;
663 flags
= READ_ONCE(w_ctx
->flags
);
665 spin_lock(&rb
->w_lock
);
666 spin_lock(&pblk
->trans_lock
);
667 l2p_ppa
= pblk_trans_map_get(pblk
, lba
);
668 spin_unlock(&pblk
->trans_lock
);
670 /* Check if the entry has been overwritten or is scheduled to be */
671 if (!pblk_ppa_comp(l2p_ppa
, ppa
) || w_ctx
->lba
!= lba
||
672 flags
& PBLK_WRITABLE_ENTRY
) {
676 data
= bio_data(bio
);
677 memcpy(data
, entry
->data
, rb
->seg_size
);
680 spin_unlock(&rb
->w_lock
);
684 struct pblk_w_ctx
*pblk_rb_w_ctx(struct pblk_rb
*rb
, unsigned int pos
)
686 unsigned int entry
= pblk_rb_ptr_wrap(rb
, pos
, 0);
688 return &rb
->entries
[entry
].w_ctx
;
691 unsigned int pblk_rb_sync_init(struct pblk_rb
*rb
, unsigned long *flags
)
692 __acquires(&rb
->s_lock
)
695 spin_lock_irqsave(&rb
->s_lock
, *flags
);
697 spin_lock_irq(&rb
->s_lock
);
702 void pblk_rb_sync_end(struct pblk_rb
*rb
, unsigned long *flags
)
703 __releases(&rb
->s_lock
)
705 lockdep_assert_held(&rb
->s_lock
);
708 spin_unlock_irqrestore(&rb
->s_lock
, *flags
);
710 spin_unlock_irq(&rb
->s_lock
);
713 unsigned int pblk_rb_sync_advance(struct pblk_rb
*rb
, unsigned int nr_entries
)
715 unsigned int sync
, flush_point
;
716 lockdep_assert_held(&rb
->s_lock
);
718 sync
= READ_ONCE(rb
->sync
);
719 flush_point
= READ_ONCE(rb
->flush_point
);
721 if (flush_point
!= EMPTY_ENTRY
) {
722 unsigned int secs_to_flush
;
724 secs_to_flush
= pblk_rb_ring_count(flush_point
, sync
,
726 if (secs_to_flush
< nr_entries
) {
727 /* Protect flush points */
728 smp_store_release(&rb
->flush_point
, EMPTY_ENTRY
);
732 sync
= pblk_rb_ptr_wrap(rb
, sync
, nr_entries
);
734 /* Protect from counts */
735 smp_store_release(&rb
->sync
, sync
);
740 /* Calculate how many sectors to submit up to the current flush point. */
741 unsigned int pblk_rb_flush_point_count(struct pblk_rb
*rb
)
743 unsigned int subm
, sync
, flush_point
;
744 unsigned int submitted
, to_flush
;
746 /* Protect flush points */
747 flush_point
= smp_load_acquire(&rb
->flush_point
);
748 if (flush_point
== EMPTY_ENTRY
)
752 sync
= smp_load_acquire(&rb
->sync
);
754 subm
= READ_ONCE(rb
->subm
);
755 submitted
= pblk_rb_ring_count(subm
, sync
, rb
->nr_entries
);
757 /* The sync point itself counts as a sector to sync */
758 to_flush
= pblk_rb_ring_count(flush_point
, sync
, rb
->nr_entries
) + 1;
760 return (submitted
< to_flush
) ? (to_flush
- submitted
) : 0;
763 int pblk_rb_tear_down_check(struct pblk_rb
*rb
)
765 struct pblk_rb_entry
*entry
;
769 spin_lock(&rb
->w_lock
);
770 spin_lock_irq(&rb
->s_lock
);
772 if ((rb
->mem
== rb
->subm
) && (rb
->subm
== rb
->sync
) &&
773 (rb
->sync
== rb
->l2p_update
) &&
774 (rb
->flush_point
== EMPTY_ENTRY
)) {
783 for (i
= 0; i
< rb
->nr_entries
; i
++) {
784 entry
= &rb
->entries
[i
];
793 spin_unlock_irq(&rb
->s_lock
);
794 spin_unlock(&rb
->w_lock
);
799 unsigned int pblk_rb_wrap_pos(struct pblk_rb
*rb
, unsigned int pos
)
801 return (pos
& (rb
->nr_entries
- 1));
804 int pblk_rb_pos_oob(struct pblk_rb
*rb
, u64 pos
)
806 return (pos
>= rb
->nr_entries
);
809 ssize_t
pblk_rb_sysfs(struct pblk_rb
*rb
, char *buf
)
811 struct pblk
*pblk
= container_of(rb
, struct pblk
, rwb
);
812 struct pblk_c_ctx
*c
;
814 int queued_entries
= 0;
816 spin_lock_irq(&rb
->s_lock
);
817 list_for_each_entry(c
, &pblk
->compl_list
, list
)
819 spin_unlock_irq(&rb
->s_lock
);
821 if (rb
->flush_point
!= EMPTY_ENTRY
)
822 offset
= scnprintf(buf
, PAGE_SIZE
,
823 "%u\t%u\t%u\t%u\t%u\t%u\t%u - %u/%u/%u - %d\n",
829 #ifdef CONFIG_NVM_PBLK_DEBUG
830 atomic_read(&rb
->inflight_flush_point
),
835 pblk_rb_read_count(rb
),
837 pblk_rb_flush_point_count(rb
),
840 offset
= scnprintf(buf
, PAGE_SIZE
,
841 "%u\t%u\t%u\t%u\t%u\t%u\tNULL - %u/%u/%u - %d\n",
847 #ifdef CONFIG_NVM_PBLK_DEBUG
848 atomic_read(&rb
->inflight_flush_point
),
852 pblk_rb_read_count(rb
),
854 pblk_rb_flush_point_count(rb
),