2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
27 #include <linux/lightnvm.h>
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
38 struct rrpc_inflight
{
39 struct list_head reqs
;
43 struct rrpc_inflight_rq
{
44 struct list_head list
;
50 struct rrpc_inflight_rq inflight_rq
;
51 struct rrpc_addr
*addr
;
56 struct nvm_block
*parent
;
57 struct rrpc_lun
*rlun
;
58 struct list_head prio
;
59 struct list_head list
;
61 #define MAX_INVALID_PAGES_STORAGE 8
62 /* Bitmap for invalid page intries */
63 unsigned long invalid_pages
[MAX_INVALID_PAGES_STORAGE
];
64 /* points to the next writable page within a block */
65 unsigned int next_page
;
66 /* number of pages that are invalid, wrt host page size */
67 unsigned int nr_invalid_pages
;
70 atomic_t data_cmnt_size
; /* data pages committed to stable storage */
75 struct nvm_lun
*parent
;
76 struct rrpc_block
*cur
, *gc_cur
;
77 struct rrpc_block
*blocks
; /* Reference to block allocation */
79 struct list_head prio_list
; /* Blocks that may be GC'ed */
80 struct list_head open_list
; /* In-use open blocks. These are blocks
81 * that can be both written to and read
84 struct list_head closed_list
; /* In-use closed blocks. These are
85 * blocks that can _only_ be read from
88 struct work_struct ws_gc
;
94 /* instance must be kept in top to resolve rrpc in unprep */
95 struct nvm_tgt_instance instance
;
100 sector_t soffset
; /* logical sector offset */
101 u64 poffset
; /* physical page offset */
105 struct rrpc_lun
*luns
;
107 /* calculated values */
108 unsigned long long nr_sects
;
109 unsigned long total_blocks
;
111 /* Write strategy variables. Move these into each for structure for each
114 atomic_t next_lun
; /* Whenever a page is written, this is updated
115 * to point to the next write lun
119 struct bio_list requeue_bios
;
120 struct work_struct ws_requeue
;
122 /* Simple translation map of logical addresses to physical addresses.
123 * The logical addresses is known by the host system, while the physical
124 * addresses are used when writing to the disk block device.
126 struct rrpc_addr
*trans_map
;
127 /* also store a reverse map for garbage collection */
128 struct rrpc_rev_addr
*rev_trans_map
;
131 struct rrpc_inflight inflights
;
133 mempool_t
*addr_pool
;
134 mempool_t
*page_pool
;
138 struct timer_list gc_timer
;
139 struct workqueue_struct
*krqd_wq
;
140 struct workqueue_struct
*kgc_wq
;
143 struct rrpc_block_gc
{
145 struct rrpc_block
*rblk
;
146 struct work_struct ws_gc
;
149 /* Logical to physical mapping */
152 struct rrpc_block
*rblk
;
155 /* Physical to logical mapping */
156 struct rrpc_rev_addr
{
160 static inline struct rrpc_block
*rrpc_get_rblk(struct rrpc_lun
*rlun
,
163 struct rrpc
*rrpc
= rlun
->rrpc
;
164 int lun_blk
= blk_id
% rrpc
->dev
->blks_per_lun
;
166 return &rlun
->blocks
[lun_blk
];
169 static inline sector_t
rrpc_get_laddr(struct bio
*bio
)
171 return bio
->bi_iter
.bi_sector
/ NR_PHY_IN_LOG
;
174 static inline unsigned int rrpc_get_pages(struct bio
*bio
)
176 return bio
->bi_iter
.bi_size
/ RRPC_EXPOSED_PAGE_SIZE
;
179 static inline sector_t
rrpc_get_sector(sector_t laddr
)
181 return laddr
* NR_PHY_IN_LOG
;
184 static inline int request_intersects(struct rrpc_inflight_rq
*r
,
185 sector_t laddr_start
, sector_t laddr_end
)
187 return (laddr_end
>= r
->l_start
) && (laddr_start
<= r
->l_end
);
190 static int __rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
191 unsigned pages
, struct rrpc_inflight_rq
*r
)
193 sector_t laddr_end
= laddr
+ pages
- 1;
194 struct rrpc_inflight_rq
*rtmp
;
196 WARN_ON(irqs_disabled());
198 spin_lock_irq(&rrpc
->inflights
.lock
);
199 list_for_each_entry(rtmp
, &rrpc
->inflights
.reqs
, list
) {
200 if (unlikely(request_intersects(rtmp
, laddr
, laddr_end
))) {
201 /* existing, overlapping request, come back later */
202 spin_unlock_irq(&rrpc
->inflights
.lock
);
208 r
->l_end
= laddr_end
;
210 list_add_tail(&r
->list
, &rrpc
->inflights
.reqs
);
211 spin_unlock_irq(&rrpc
->inflights
.lock
);
215 static inline int rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
217 struct rrpc_inflight_rq
*r
)
219 BUG_ON((laddr
+ pages
) > rrpc
->nr_sects
);
221 return __rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
224 static inline struct rrpc_inflight_rq
*rrpc_get_inflight_rq(struct nvm_rq
*rqd
)
226 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
228 return &rrqd
->inflight_rq
;
231 static inline int rrpc_lock_rq(struct rrpc
*rrpc
, struct bio
*bio
,
234 sector_t laddr
= rrpc_get_laddr(bio
);
235 unsigned int pages
= rrpc_get_pages(bio
);
236 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
238 return rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
241 static inline void rrpc_unlock_laddr(struct rrpc
*rrpc
,
242 struct rrpc_inflight_rq
*r
)
246 spin_lock_irqsave(&rrpc
->inflights
.lock
, flags
);
247 list_del_init(&r
->list
);
248 spin_unlock_irqrestore(&rrpc
->inflights
.lock
, flags
);
251 static inline void rrpc_unlock_rq(struct rrpc
*rrpc
, struct nvm_rq
*rqd
)
253 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
254 uint8_t pages
= rqd
->nr_pages
;
256 BUG_ON((r
->l_start
+ pages
) > rrpc
->nr_sects
);
258 rrpc_unlock_laddr(rrpc
, r
);