2 * Copyright (C) 2015 IT University of Copenhagen
3 * Initial release: Matias Bjorling <m@bjorling.me>
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License version
7 * 2 as published by the Free Software Foundation.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 * General Public License for more details.
14 * Implementation of a Round-robin page-based Hybrid FTL for Open-channel SSDs.
20 #include <linux/blkdev.h>
21 #include <linux/blk-mq.h>
22 #include <linux/bio.h>
23 #include <linux/module.h>
24 #include <linux/kthread.h>
25 #include <linux/vmalloc.h>
27 #include <linux/lightnvm.h>
29 /* Run only GC if less than 1/X blocks are free */
30 #define GC_LIMIT_INVERSE 10
31 #define GC_TIME_SECS 100
33 #define RRPC_SECTOR (512)
34 #define RRPC_EXPOSED_PAGE_SIZE (4096)
36 #define NR_PHY_IN_LOG (RRPC_EXPOSED_PAGE_SIZE / RRPC_SECTOR)
38 struct rrpc_inflight
{
39 struct list_head reqs
;
43 struct rrpc_inflight_rq
{
44 struct list_head list
;
50 struct rrpc_inflight_rq inflight_rq
;
51 struct rrpc_addr
*addr
;
56 struct nvm_block
*parent
;
57 struct rrpc_lun
*rlun
;
58 struct list_head prio
;
60 #define MAX_INVALID_PAGES_STORAGE 8
61 /* Bitmap for invalid page intries */
62 unsigned long invalid_pages
[MAX_INVALID_PAGES_STORAGE
];
63 /* points to the next writable page within a block */
64 unsigned int next_page
;
65 /* number of pages that are invalid, wrt host page size */
66 unsigned int nr_invalid_pages
;
69 atomic_t data_cmnt_size
; /* data pages committed to stable storage */
74 struct nvm_lun
*parent
;
75 struct rrpc_block
*cur
, *gc_cur
;
76 struct rrpc_block
*blocks
; /* Reference to block allocation */
78 struct list_head prio_list
; /* Blocks that may be GC'ed */
79 struct list_head wblk_list
; /* Queued blocks to be written to */
81 struct work_struct ws_gc
;
87 /* instance must be kept in top to resolve rrpc in unprep */
88 struct nvm_tgt_instance instance
;
93 sector_t soffset
; /* logical sector offset */
94 u64 poffset
; /* physical page offset */
98 struct rrpc_lun
*luns
;
100 /* calculated values */
101 unsigned long long nr_sects
;
102 unsigned long total_blocks
;
104 /* Write strategy variables. Move these into each for structure for each
107 atomic_t next_lun
; /* Whenever a page is written, this is updated
108 * to point to the next write lun
112 struct bio_list requeue_bios
;
113 struct work_struct ws_requeue
;
115 /* Simple translation map of logical addresses to physical addresses.
116 * The logical addresses is known by the host system, while the physical
117 * addresses are used when writing to the disk block device.
119 struct rrpc_addr
*trans_map
;
120 /* also store a reverse map for garbage collection */
121 struct rrpc_rev_addr
*rev_trans_map
;
124 struct rrpc_inflight inflights
;
126 mempool_t
*addr_pool
;
127 mempool_t
*page_pool
;
131 struct timer_list gc_timer
;
132 struct workqueue_struct
*krqd_wq
;
133 struct workqueue_struct
*kgc_wq
;
136 struct rrpc_block_gc
{
138 struct rrpc_block
*rblk
;
139 struct work_struct ws_gc
;
142 /* Logical to physical mapping */
145 struct rrpc_block
*rblk
;
148 /* Physical to logical mapping */
149 struct rrpc_rev_addr
{
153 static inline struct rrpc_block
*rrpc_get_rblk(struct rrpc_lun
*rlun
,
156 struct rrpc
*rrpc
= rlun
->rrpc
;
157 int lun_blk
= blk_id
% rrpc
->dev
->blks_per_lun
;
159 return &rlun
->blocks
[lun_blk
];
162 static inline sector_t
rrpc_get_laddr(struct bio
*bio
)
164 return bio
->bi_iter
.bi_sector
/ NR_PHY_IN_LOG
;
167 static inline unsigned int rrpc_get_pages(struct bio
*bio
)
169 return bio
->bi_iter
.bi_size
/ RRPC_EXPOSED_PAGE_SIZE
;
172 static inline sector_t
rrpc_get_sector(sector_t laddr
)
174 return laddr
* NR_PHY_IN_LOG
;
177 static inline int request_intersects(struct rrpc_inflight_rq
*r
,
178 sector_t laddr_start
, sector_t laddr_end
)
180 return (laddr_end
>= r
->l_start
) && (laddr_start
<= r
->l_end
);
183 static int __rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
184 unsigned int pages
, struct rrpc_inflight_rq
*r
)
186 sector_t laddr_end
= laddr
+ pages
- 1;
187 struct rrpc_inflight_rq
*rtmp
;
189 WARN_ON(irqs_disabled());
191 spin_lock_irq(&rrpc
->inflights
.lock
);
192 list_for_each_entry(rtmp
, &rrpc
->inflights
.reqs
, list
) {
193 if (unlikely(request_intersects(rtmp
, laddr
, laddr_end
))) {
194 /* existing, overlapping request, come back later */
195 spin_unlock_irq(&rrpc
->inflights
.lock
);
201 r
->l_end
= laddr_end
;
203 list_add_tail(&r
->list
, &rrpc
->inflights
.reqs
);
204 spin_unlock_irq(&rrpc
->inflights
.lock
);
208 static inline int rrpc_lock_laddr(struct rrpc
*rrpc
, sector_t laddr
,
210 struct rrpc_inflight_rq
*r
)
212 BUG_ON((laddr
+ pages
) > rrpc
->nr_sects
);
214 return __rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
217 static inline struct rrpc_inflight_rq
*rrpc_get_inflight_rq(struct nvm_rq
*rqd
)
219 struct rrpc_rq
*rrqd
= nvm_rq_to_pdu(rqd
);
221 return &rrqd
->inflight_rq
;
224 static inline int rrpc_lock_rq(struct rrpc
*rrpc
, struct bio
*bio
,
227 sector_t laddr
= rrpc_get_laddr(bio
);
228 unsigned int pages
= rrpc_get_pages(bio
);
229 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
231 return rrpc_lock_laddr(rrpc
, laddr
, pages
, r
);
234 static inline void rrpc_unlock_laddr(struct rrpc
*rrpc
,
235 struct rrpc_inflight_rq
*r
)
239 spin_lock_irqsave(&rrpc
->inflights
.lock
, flags
);
240 list_del_init(&r
->list
);
241 spin_unlock_irqrestore(&rrpc
->inflights
.lock
, flags
);
244 static inline void rrpc_unlock_rq(struct rrpc
*rrpc
, struct nvm_rq
*rqd
)
246 struct rrpc_inflight_rq
*r
= rrpc_get_inflight_rq(rqd
);
247 uint8_t pages
= rqd
->nr_ppas
;
249 BUG_ON((r
->l_start
+ pages
) > rrpc
->nr_sects
);
251 rrpc_unlock_laddr(rrpc
, r
);