Back to home page

LXR

 
 

    


0001 /* bounce buffer handling for block devices
0002  *
0003  * - Split from highmem.c
0004  */
0005 
0006 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
0007 
0008 #include <linux/mm.h>
0009 #include <linux/export.h>
0010 #include <linux/swap.h>
0011 #include <linux/gfp.h>
0012 #include <linux/bio.h>
0013 #include <linux/pagemap.h>
0014 #include <linux/mempool.h>
0015 #include <linux/blkdev.h>
0016 #include <linux/backing-dev.h>
0017 #include <linux/init.h>
0018 #include <linux/hash.h>
0019 #include <linux/highmem.h>
0020 #include <linux/bootmem.h>
0021 #include <linux/printk.h>
0022 #include <asm/tlbflush.h>
0023 
0024 #include <trace/events/block.h>
0025 
0026 #define POOL_SIZE   64
0027 #define ISA_POOL_SIZE   16
0028 
0029 static mempool_t *page_pool, *isa_page_pool;
0030 
0031 #if defined(CONFIG_HIGHMEM) || defined(CONFIG_NEED_BOUNCE_POOL)
0032 static __init int init_emergency_pool(void)
0033 {
0034 #if defined(CONFIG_HIGHMEM) && !defined(CONFIG_MEMORY_HOTPLUG)
0035     if (max_pfn <= max_low_pfn)
0036         return 0;
0037 #endif
0038 
0039     page_pool = mempool_create_page_pool(POOL_SIZE, 0);
0040     BUG_ON(!page_pool);
0041     pr_info("pool size: %d pages\n", POOL_SIZE);
0042 
0043     return 0;
0044 }
0045 
0046 __initcall(init_emergency_pool);
0047 #endif
0048 
0049 #ifdef CONFIG_HIGHMEM
0050 /*
0051  * highmem version, map in to vec
0052  */
0053 static void bounce_copy_vec(struct bio_vec *to, unsigned char *vfrom)
0054 {
0055     unsigned long flags;
0056     unsigned char *vto;
0057 
0058     local_irq_save(flags);
0059     vto = kmap_atomic(to->bv_page);
0060     memcpy(vto + to->bv_offset, vfrom, to->bv_len);
0061     kunmap_atomic(vto);
0062     local_irq_restore(flags);
0063 }
0064 
0065 #else /* CONFIG_HIGHMEM */
0066 
0067 #define bounce_copy_vec(to, vfrom)  \
0068     memcpy(page_address((to)->bv_page) + (to)->bv_offset, vfrom, (to)->bv_len)
0069 
0070 #endif /* CONFIG_HIGHMEM */
0071 
0072 /*
0073  * allocate pages in the DMA region for the ISA pool
0074  */
0075 static void *mempool_alloc_pages_isa(gfp_t gfp_mask, void *data)
0076 {
0077     return mempool_alloc_pages(gfp_mask | GFP_DMA, data);
0078 }
0079 
0080 /*
0081  * gets called "every" time someone init's a queue with BLK_BOUNCE_ISA
0082  * as the max address, so check if the pool has already been created.
0083  */
0084 int init_emergency_isa_pool(void)
0085 {
0086     if (isa_page_pool)
0087         return 0;
0088 
0089     isa_page_pool = mempool_create(ISA_POOL_SIZE, mempool_alloc_pages_isa,
0090                        mempool_free_pages, (void *) 0);
0091     BUG_ON(!isa_page_pool);
0092 
0093     pr_info("isa pool size: %d pages\n", ISA_POOL_SIZE);
0094     return 0;
0095 }
0096 
0097 /*
0098  * Simple bounce buffer support for highmem pages. Depending on the
0099  * queue gfp mask set, *to may or may not be a highmem page. kmap it
0100  * always, it will do the Right Thing
0101  */
0102 static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
0103 {
0104     unsigned char *vfrom;
0105     struct bio_vec tovec, *fromvec = from->bi_io_vec;
0106     struct bvec_iter iter;
0107 
0108     bio_for_each_segment(tovec, to, iter) {
0109         if (tovec.bv_page != fromvec->bv_page) {
0110             /*
0111              * fromvec->bv_offset and fromvec->bv_len might have
0112              * been modified by the block layer, so use the original
0113              * copy, bounce_copy_vec already uses tovec->bv_len
0114              */
0115             vfrom = page_address(fromvec->bv_page) +
0116                 tovec.bv_offset;
0117 
0118             bounce_copy_vec(&tovec, vfrom);
0119             flush_dcache_page(tovec.bv_page);
0120         }
0121 
0122         fromvec++;
0123     }
0124 }
0125 
0126 static void bounce_end_io(struct bio *bio, mempool_t *pool)
0127 {
0128     struct bio *bio_orig = bio->bi_private;
0129     struct bio_vec *bvec, *org_vec;
0130     int i;
0131     int start = bio_orig->bi_iter.bi_idx;
0132 
0133     /*
0134      * free up bounce indirect pages used
0135      */
0136     bio_for_each_segment_all(bvec, bio, i) {
0137         org_vec = bio_orig->bi_io_vec + i + start;
0138 
0139         if (bvec->bv_page == org_vec->bv_page)
0140             continue;
0141 
0142         dec_zone_page_state(bvec->bv_page, NR_BOUNCE);
0143         mempool_free(bvec->bv_page, pool);
0144     }
0145 
0146     bio_orig->bi_error = bio->bi_error;
0147     bio_endio(bio_orig);
0148     bio_put(bio);
0149 }
0150 
0151 static void bounce_end_io_write(struct bio *bio)
0152 {
0153     bounce_end_io(bio, page_pool);
0154 }
0155 
0156 static void bounce_end_io_write_isa(struct bio *bio)
0157 {
0158 
0159     bounce_end_io(bio, isa_page_pool);
0160 }
0161 
0162 static void __bounce_end_io_read(struct bio *bio, mempool_t *pool)
0163 {
0164     struct bio *bio_orig = bio->bi_private;
0165 
0166     if (!bio->bi_error)
0167         copy_to_high_bio_irq(bio_orig, bio);
0168 
0169     bounce_end_io(bio, pool);
0170 }
0171 
0172 static void bounce_end_io_read(struct bio *bio)
0173 {
0174     __bounce_end_io_read(bio, page_pool);
0175 }
0176 
0177 static void bounce_end_io_read_isa(struct bio *bio)
0178 {
0179     __bounce_end_io_read(bio, isa_page_pool);
0180 }
0181 
0182 static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
0183                    mempool_t *pool)
0184 {
0185     struct bio *bio;
0186     int rw = bio_data_dir(*bio_orig);
0187     struct bio_vec *to, from;
0188     struct bvec_iter iter;
0189     unsigned i;
0190 
0191     bio_for_each_segment(from, *bio_orig, iter)
0192         if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
0193             goto bounce;
0194 
0195     return;
0196 bounce:
0197     bio = bio_clone_bioset(*bio_orig, GFP_NOIO, fs_bio_set);
0198 
0199     bio_for_each_segment_all(to, bio, i) {
0200         struct page *page = to->bv_page;
0201 
0202         if (page_to_pfn(page) <= queue_bounce_pfn(q))
0203             continue;
0204 
0205         to->bv_page = mempool_alloc(pool, q->bounce_gfp);
0206         inc_zone_page_state(to->bv_page, NR_BOUNCE);
0207 
0208         if (rw == WRITE) {
0209             char *vto, *vfrom;
0210 
0211             flush_dcache_page(page);
0212 
0213             vto = page_address(to->bv_page) + to->bv_offset;
0214             vfrom = kmap_atomic(page) + to->bv_offset;
0215             memcpy(vto, vfrom, to->bv_len);
0216             kunmap_atomic(vfrom);
0217         }
0218     }
0219 
0220     trace_block_bio_bounce(q, *bio_orig);
0221 
0222     bio->bi_flags |= (1 << BIO_BOUNCED);
0223 
0224     if (pool == page_pool) {
0225         bio->bi_end_io = bounce_end_io_write;
0226         if (rw == READ)
0227             bio->bi_end_io = bounce_end_io_read;
0228     } else {
0229         bio->bi_end_io = bounce_end_io_write_isa;
0230         if (rw == READ)
0231             bio->bi_end_io = bounce_end_io_read_isa;
0232     }
0233 
0234     bio->bi_private = *bio_orig;
0235     *bio_orig = bio;
0236 }
0237 
0238 void blk_queue_bounce(struct request_queue *q, struct bio **bio_orig)
0239 {
0240     mempool_t *pool;
0241 
0242     /*
0243      * Data-less bio, nothing to bounce
0244      */
0245     if (!bio_has_data(*bio_orig))
0246         return;
0247 
0248     /*
0249      * for non-isa bounce case, just check if the bounce pfn is equal
0250      * to or bigger than the highest pfn in the system -- in that case,
0251      * don't waste time iterating over bio segments
0252      */
0253     if (!(q->bounce_gfp & GFP_DMA)) {
0254         if (queue_bounce_pfn(q) >= blk_max_pfn)
0255             return;
0256         pool = page_pool;
0257     } else {
0258         BUG_ON(!isa_page_pool);
0259         pool = isa_page_pool;
0260     }
0261 
0262     /*
0263      * slow path
0264      */
0265     __blk_queue_bounce(q, bio_orig, pool);
0266 }
0267 
0268 EXPORT_SYMBOL(blk_queue_bounce);