1 /* bounce buffer handling for block devices
3 * - Split from highmem.c
7 #include <linux/export.h>
8 #include <linux/swap.h>
10 #include <linux/bio.h>
11 #include <linux/pagemap.h>
12 #include <linux/mempool.h>
13 #include <linux/blkdev.h>
14 #include <linux/init.h>
15 #include <linux/hash.h>
16 #include <linux/highmem.h>
17 #include <linux/bootmem.h>
18 #include <asm/tlbflush.h>
20 #include <trace/events/block.h>
23 #define ISA_POOL_SIZE 16
25 static mempool_t
*page_pool
, *isa_page_pool
;
27 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
28 static __init
int init_emergency_pool(void)
30 #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
31 if (max_pfn
<= max_low_pfn
)
35 page_pool
= mempool_create_page_pool(POOL_SIZE
, 0);
37 printk("bounce pool size: %d pages\n", POOL_SIZE
);
42 __initcall(init_emergency_pool
);
47 * highmem version, map in to vec
49 static void bounce_copy_vec(struct bio_vec
*to
, unsigned char *vfrom
)
54 local_irq_save(flags
);
55 vto
= kmap_atomic(to
->bv_page
);
56 memcpy(vto
+ to
->bv_offset
, vfrom
, to
->bv_len
);
58 local_irq_restore(flags
);
61 #else /* CONFIG_HIGHMEM */
63 #define bounce_copy_vec(to, vfrom) \
64 memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
66 #endif /* CONFIG_HIGHMEM */
69 * allocate pages in the DMA region for the ISA pool
71 static void *mempool_alloc_pages_isa(gfp_t gfp_mask
, void *data
)
73 return mempool_alloc_pages(gfp_mask
| GFP_DMA
, data
);
77 * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
78 * as the max address, so check if the pool has already been created.
80 int init_emergency_isa_pool(void)
85 isa_page_pool
= mempool_create(ISA_POOL_SIZE
, mempool_alloc_pages_isa
,
86 mempool_free_pages
, (void *) 0);
87 BUG_ON(!isa_page_pool
);
89 printk("isa bounce pool size: %d pages\n", ISA_POOL_SIZE
);
94 * Simple bounce buffer support for highmem pages. Depending on the
95 * queue gfp mask set, *to may or may not be a highmem page. kmap it
96 * always, it will do the Right Thing
98 static void copy_to_high_bio_irq(struct bio
*to
, struct bio
*from
)
100 unsigned char *vfrom
;
101 struct bio_vec
*tovec
, *fromvec
;
104 bio_for_each_segment(tovec
, to
, i
) {
105 fromvec
= from
->bi_io_vec
+ i
;
110 if (tovec
->bv_page
== fromvec
->bv_page
)
114 * fromvec->bv_offset and fromvec->bv_len might have been
115 * modified by the block layer, so use the original copy,
116 * bounce_copy_vec already uses tovec->bv_len
118 vfrom
= page_address(fromvec
->bv_page
) + tovec
->bv_offset
;
120 bounce_copy_vec(tovec
, vfrom
);
121 flush_dcache_page(tovec
->bv_page
);
125 static void bounce_end_io(struct bio
*bio
, mempool_t
*pool
, int err
)
127 struct bio
*bio_orig
= bio
->bi_private
;
128 struct bio_vec
*bvec
, *org_vec
;
131 if (test_bit(BIO_EOPNOTSUPP
, &bio
->bi_flags
))
132 set_bit(BIO_EOPNOTSUPP
, &bio_orig
->bi_flags
);
135 * free up bounce indirect pages used
137 bio_for_each_segment_all(bvec
, bio
, i
) {
138 org_vec
= bio_orig
->bi_io_vec
+ i
;
139 if (bvec
->bv_page
== org_vec
->bv_page
)
142 dec_zone_page_state(bvec
->bv_page
, NR_BOUNCE
);
143 mempool_free(bvec
->bv_page
, pool
);
146 bio_endio(bio_orig
, err
);
150 static void bounce_end_io_write(struct bio
*bio
, int err
)
152 bounce_end_io(bio
, page_pool
, err
);
155 static void bounce_end_io_write_isa(struct bio
*bio
, int err
)
158 bounce_end_io(bio
, isa_page_pool
, err
);
161 static void __bounce_end_io_read(struct bio
*bio
, mempool_t
*pool
, int err
)
163 struct bio
*bio_orig
= bio
->bi_private
;
165 if (test_bit(BIO_UPTODATE
, &bio
->bi_flags
))
166 copy_to_high_bio_irq(bio_orig
, bio
);
168 bounce_end_io(bio
, pool
, err
);
171 static void bounce_end_io_read(struct bio
*bio
, int err
)
173 __bounce_end_io_read(bio
, page_pool
, err
);
176 static void bounce_end_io_read_isa(struct bio
*bio
, int err
)
178 __bounce_end_io_read(bio
, isa_page_pool
, err
);
181 #ifdef CONFIG_NEED_BOUNCE_POOL
182 static int must_snapshot_stable_pages(struct request_queue
*q
, struct bio
*bio
)
184 if (bio_data_dir(bio
) != WRITE
)
187 if (!bdi_cap_stable_pages_required(&q
->backing_dev_info
))
190 return test_bit(BIO_SNAP_STABLE
, &bio
->bi_flags
);
193 static int must_snapshot_stable_pages(struct request_queue
*q
, struct bio
*bio
)
197 #endif /* CONFIG_NEED_BOUNCE_POOL */
199 static void __blk_queue_bounce(struct request_queue
*q
, struct bio
**bio_orig
,
200 mempool_t
*pool
, int force
)
203 int rw
= bio_data_dir(*bio_orig
);
204 struct bio_vec
*to
, *from
;
209 bio_for_each_segment(from
, *bio_orig
, i
)
210 if (page_to_pfn(from
->bv_page
) > queue_bounce_pfn(q
))
215 bio
= bio_clone_bioset(*bio_orig
, GFP_NOIO
, fs_bio_set
);
217 bio_for_each_segment_all(to
, bio
, i
) {
218 struct page
*page
= to
->bv_page
;
220 if (page_to_pfn(page
) <= queue_bounce_pfn(q
) && !force
)
223 inc_zone_page_state(to
->bv_page
, NR_BOUNCE
);
224 to
->bv_page
= mempool_alloc(pool
, q
->bounce_gfp
);
229 flush_dcache_page(page
);
231 vto
= page_address(to
->bv_page
) + to
->bv_offset
;
232 vfrom
= kmap_atomic(page
) + to
->bv_offset
;
233 memcpy(vto
, vfrom
, to
->bv_len
);
234 kunmap_atomic(vfrom
);
238 trace_block_bio_bounce(q
, *bio_orig
);
240 bio
->bi_flags
|= (1 << BIO_BOUNCED
);
242 if (pool
== page_pool
) {
243 bio
->bi_end_io
= bounce_end_io_write
;
245 bio
->bi_end_io
= bounce_end_io_read
;
247 bio
->bi_end_io
= bounce_end_io_write_isa
;
249 bio
->bi_end_io
= bounce_end_io_read_isa
;
252 bio
->bi_private
= *bio_orig
;
256 void blk_queue_bounce(struct request_queue
*q
, struct bio
**bio_orig
)
262 * Data-less bio, nothing to bounce
264 if (!bio_has_data(*bio_orig
))
267 must_bounce
= must_snapshot_stable_pages(q
, *bio_orig
);
270 * for non-isa bounce case, just check if the bounce pfn is equal
271 * to or bigger than the highest pfn in the system -- in that case,
272 * don't waste time iterating over bio segments
274 if (!(q
->bounce_gfp
& GFP_DMA
)) {
275 if (queue_bounce_pfn(q
) >= blk_max_pfn
&& !must_bounce
)
279 BUG_ON(!isa_page_pool
);
280 pool
= isa_page_pool
;
286 __blk_queue_bounce(q
, bio_orig
, pool
, must_bounce
);
289 EXPORT_SYMBOL(blk_queue_bounce
);