0001
0002
0003
0004
0005
0006
0007
0008
0009
0010
0011 #include <linux/module.h>
0012 #include <linux/kernel.h>
0013 #include <linux/errno.h>
0014 #include <linux/string.h>
0015 #include <linux/mm.h>
0016 #include <linux/vmalloc.h>
0017 #include <linux/delay.h>
0018 #include <linux/interrupt.h>
0019 #include <linux/fb.h>
0020 #include <linux/list.h>
0021
0022
0023 #include <linux/rmap.h>
0024 #include <linux/pagemap.h>
0025
0026 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
0027 {
0028 void *screen_base = (void __force *) info->screen_base;
0029 struct page *page;
0030
0031 if (is_vmalloc_addr(screen_base + offs))
0032 page = vmalloc_to_page(screen_base + offs);
0033 else
0034 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
0035
0036 return page;
0037 }
0038
0039 static struct fb_deferred_io_pageref *fb_deferred_io_pageref_get(struct fb_info *info,
0040 unsigned long offset,
0041 struct page *page)
0042 {
0043 struct fb_deferred_io *fbdefio = info->fbdefio;
0044 struct list_head *pos = &fbdefio->pagereflist;
0045 unsigned long pgoff = offset >> PAGE_SHIFT;
0046 struct fb_deferred_io_pageref *pageref, *cur;
0047
0048 if (WARN_ON_ONCE(pgoff >= info->npagerefs))
0049 return NULL;
0050
0051
0052 pageref = &info->pagerefs[pgoff];
0053
0054
0055
0056
0057
0058
0059
0060 if (!list_empty(&pageref->list))
0061 goto pageref_already_added;
0062
0063 pageref->page = page;
0064 pageref->offset = pgoff << PAGE_SHIFT;
0065
0066 if (unlikely(fbdefio->sort_pagereflist)) {
0067
0068
0069
0070
0071
0072
0073
0074 list_for_each_entry(cur, &fbdefio->pagereflist, list) {
0075 if (cur->offset > pageref->offset)
0076 break;
0077 }
0078 pos = &cur->list;
0079 }
0080
0081 list_add_tail(&pageref->list, pos);
0082
0083 pageref_already_added:
0084 return pageref;
0085 }
0086
0087 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref *pageref,
0088 struct fb_info *info)
0089 {
0090 list_del_init(&pageref->list);
0091 }
0092
0093
0094 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
0095 {
0096 unsigned long offset;
0097 struct page *page;
0098 struct fb_info *info = vmf->vma->vm_private_data;
0099
0100 offset = vmf->pgoff << PAGE_SHIFT;
0101 if (offset >= info->fix.smem_len)
0102 return VM_FAULT_SIGBUS;
0103
0104 page = fb_deferred_io_page(info, offset);
0105 if (!page)
0106 return VM_FAULT_SIGBUS;
0107
0108 get_page(page);
0109
0110 if (vmf->vma->vm_file)
0111 page->mapping = vmf->vma->vm_file->f_mapping;
0112 else
0113 printk(KERN_ERR "no mapping available\n");
0114
0115 BUG_ON(!page->mapping);
0116 page->index = vmf->pgoff;
0117
0118 vmf->page = page;
0119 return 0;
0120 }
0121
0122 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
0123 {
0124 struct fb_info *info = file->private_data;
0125 struct inode *inode = file_inode(file);
0126 int err = file_write_and_wait_range(file, start, end);
0127 if (err)
0128 return err;
0129
0130
0131 if (!info->fbdefio)
0132 return 0;
0133
0134 inode_lock(inode);
0135
0136 cancel_delayed_work_sync(&info->deferred_work);
0137
0138
0139 schedule_delayed_work(&info->deferred_work, 0);
0140 inode_unlock(inode);
0141
0142 return 0;
0143 }
0144 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
0145
0146
0147
0148
0149
0150 static vm_fault_t fb_deferred_io_track_page(struct fb_info *info, unsigned long offset,
0151 struct page *page)
0152 {
0153 struct fb_deferred_io *fbdefio = info->fbdefio;
0154 struct fb_deferred_io_pageref *pageref;
0155 vm_fault_t ret;
0156
0157
0158 mutex_lock(&fbdefio->lock);
0159
0160
0161 if (fbdefio->first_io && list_empty(&fbdefio->pagereflist))
0162 fbdefio->first_io(info);
0163
0164 pageref = fb_deferred_io_pageref_get(info, offset, page);
0165 if (WARN_ON_ONCE(!pageref)) {
0166 ret = VM_FAULT_OOM;
0167 goto err_mutex_unlock;
0168 }
0169
0170
0171
0172
0173
0174
0175
0176
0177
0178 lock_page(pageref->page);
0179
0180 mutex_unlock(&fbdefio->lock);
0181
0182
0183 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
0184 return VM_FAULT_LOCKED;
0185
0186 err_mutex_unlock:
0187 mutex_unlock(&fbdefio->lock);
0188 return ret;
0189 }
0190
0191
0192
0193
0194
0195
0196
0197
0198
0199
0200
0201
0202
0203
0204
0205 static vm_fault_t fb_deferred_io_page_mkwrite(struct fb_info *info, struct vm_fault *vmf)
0206 {
0207 unsigned long offset = vmf->address - vmf->vma->vm_start;
0208 struct page *page = vmf->page;
0209
0210 file_update_time(vmf->vma->vm_file);
0211
0212 return fb_deferred_io_track_page(info, offset, page);
0213 }
0214
0215
0216 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
0217 {
0218 struct fb_info *info = vmf->vma->vm_private_data;
0219
0220 return fb_deferred_io_page_mkwrite(info, vmf);
0221 }
0222
0223 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
0224 .fault = fb_deferred_io_fault,
0225 .page_mkwrite = fb_deferred_io_mkwrite,
0226 };
0227
0228 static const struct address_space_operations fb_deferred_io_aops = {
0229 .dirty_folio = noop_dirty_folio,
0230 };
0231
0232 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
0233 {
0234 vma->vm_ops = &fb_deferred_io_vm_ops;
0235 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
0236 if (!(info->flags & FBINFO_VIRTFB))
0237 vma->vm_flags |= VM_IO;
0238 vma->vm_private_data = info;
0239 return 0;
0240 }
0241 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap);
0242
0243
0244 static void fb_deferred_io_work(struct work_struct *work)
0245 {
0246 struct fb_info *info = container_of(work, struct fb_info, deferred_work.work);
0247 struct fb_deferred_io_pageref *pageref, *next;
0248 struct fb_deferred_io *fbdefio = info->fbdefio;
0249
0250
0251 mutex_lock(&fbdefio->lock);
0252 list_for_each_entry(pageref, &fbdefio->pagereflist, list) {
0253 struct page *cur = pageref->page;
0254 lock_page(cur);
0255 page_mkclean(cur);
0256 unlock_page(cur);
0257 }
0258
0259
0260 fbdefio->deferred_io(info, &fbdefio->pagereflist);
0261
0262
0263 list_for_each_entry_safe(pageref, next, &fbdefio->pagereflist, list)
0264 fb_deferred_io_pageref_put(pageref, info);
0265
0266 mutex_unlock(&fbdefio->lock);
0267 }
0268
0269 int fb_deferred_io_init(struct fb_info *info)
0270 {
0271 struct fb_deferred_io *fbdefio = info->fbdefio;
0272 struct fb_deferred_io_pageref *pagerefs;
0273 unsigned long npagerefs, i;
0274 int ret;
0275
0276 BUG_ON(!fbdefio);
0277
0278 if (WARN_ON(!info->fix.smem_len))
0279 return -EINVAL;
0280
0281 mutex_init(&fbdefio->lock);
0282 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
0283 INIT_LIST_HEAD(&fbdefio->pagereflist);
0284 if (fbdefio->delay == 0)
0285 fbdefio->delay = HZ;
0286
0287 npagerefs = DIV_ROUND_UP(info->fix.smem_len, PAGE_SIZE);
0288
0289
0290 pagerefs = kvcalloc(npagerefs, sizeof(*pagerefs), GFP_KERNEL);
0291 if (!pagerefs) {
0292 ret = -ENOMEM;
0293 goto err;
0294 }
0295 for (i = 0; i < npagerefs; ++i)
0296 INIT_LIST_HEAD(&pagerefs[i].list);
0297 info->npagerefs = npagerefs;
0298 info->pagerefs = pagerefs;
0299
0300 return 0;
0301
0302 err:
0303 mutex_destroy(&fbdefio->lock);
0304 return ret;
0305 }
0306 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
0307
0308 void fb_deferred_io_open(struct fb_info *info,
0309 struct inode *inode,
0310 struct file *file)
0311 {
0312 file->f_mapping->a_ops = &fb_deferred_io_aops;
0313 }
0314 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
0315
0316 void fb_deferred_io_cleanup(struct fb_info *info)
0317 {
0318 struct fb_deferred_io *fbdefio = info->fbdefio;
0319 struct page *page;
0320 int i;
0321
0322 BUG_ON(!fbdefio);
0323 cancel_delayed_work_sync(&info->deferred_work);
0324
0325
0326 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
0327 page = fb_deferred_io_page(info, i);
0328 page->mapping = NULL;
0329 }
0330
0331 kvfree(info->pagerefs);
0332 mutex_destroy(&fbdefio->lock);
0333 }
0334 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);