2 * linux/drivers/video/fb_defio.c
4 * Copyright (C) 2006 Jaya Kumar
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
20 #include <linux/list.h>
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
26 static struct page
*fb_deferred_io_get_page(struct fb_info
*info
, unsigned long offs
)
28 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
29 const void *screen_buffer
= info
->screen_buffer
;
30 struct page
*page
= NULL
;
32 if (fbdefio
->get_page
)
33 return fbdefio
->get_page(info
, offs
);
35 if (is_vmalloc_addr(screen_buffer
+ offs
))
36 page
= vmalloc_to_page(screen_buffer
+ offs
);
37 else if (info
->fix
.smem_start
)
38 page
= pfn_to_page((info
->fix
.smem_start
+ offs
) >> PAGE_SHIFT
);
46 static struct fb_deferred_io_pageref
*fb_deferred_io_pageref_lookup(struct fb_info
*info
,
50 unsigned long pgoff
= offset
>> PAGE_SHIFT
;
51 struct fb_deferred_io_pageref
*pageref
;
53 if (fb_WARN_ON_ONCE(info
, pgoff
>= info
->npagerefs
))
54 return NULL
; /* incorrect allocation size */
56 /* 1:1 mapping between pageref and page offset */
57 pageref
= &info
->pagerefs
[pgoff
];
63 pageref
->offset
= pgoff
<< PAGE_SHIFT
;
64 INIT_LIST_HEAD(&pageref
->list
);
67 if (fb_WARN_ON_ONCE(info
, pageref
->page
!= page
))
68 return NULL
; /* inconsistent state */
72 static void fb_deferred_io_pageref_clear(struct fb_deferred_io_pageref
*pageref
)
74 struct page
*page
= pageref
->page
;
80 static struct fb_deferred_io_pageref
*fb_deferred_io_pageref_get(struct fb_info
*info
,
84 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
85 struct list_head
*pos
= &fbdefio
->pagereflist
;
86 struct fb_deferred_io_pageref
*pageref
, *cur
;
88 pageref
= fb_deferred_io_pageref_lookup(info
, offset
, page
);
93 * This check is to catch the case where a new process could start
94 * writing to the same page through a new PTE. This new access
95 * can cause a call to .page_mkwrite even if the original process'
96 * PTE is marked writable.
98 if (!list_empty(&pageref
->list
))
99 goto pageref_already_added
;
101 if (unlikely(fbdefio
->sort_pagereflist
)) {
103 * We loop through the list of pagerefs before adding in
104 * order to keep the pagerefs sorted. This has significant
105 * overhead of O(n^2) with n being the number of written
106 * pages. If possible, drivers should try to work with
107 * unsorted page lists instead.
109 list_for_each_entry(cur
, &fbdefio
->pagereflist
, list
) {
110 if (cur
->offset
> pageref
->offset
)
116 list_add_tail(&pageref
->list
, pos
);
118 pageref_already_added
:
122 static void fb_deferred_io_pageref_put(struct fb_deferred_io_pageref
*pageref
,
123 struct fb_info
*info
)
125 list_del_init(&pageref
->list
);
128 /* this is to find and return the vmalloc-ed fb pages */
129 static vm_fault_t
fb_deferred_io_fault(struct vm_fault
*vmf
)
131 unsigned long offset
;
133 struct fb_info
*info
= vmf
->vma
->vm_private_data
;
135 offset
= vmf
->pgoff
<< PAGE_SHIFT
;
136 if (offset
>= info
->fix
.smem_len
)
137 return VM_FAULT_SIGBUS
;
139 page
= fb_deferred_io_get_page(info
, offset
);
141 return VM_FAULT_SIGBUS
;
143 if (vmf
->vma
->vm_file
)
144 page
->mapping
= vmf
->vma
->vm_file
->f_mapping
;
146 printk(KERN_ERR
"no mapping available\n");
148 BUG_ON(!page
->mapping
);
149 page
->index
= vmf
->pgoff
; /* for folio_mkclean() */
155 int fb_deferred_io_fsync(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
157 struct fb_info
*info
= file
->private_data
;
158 struct inode
*inode
= file_inode(file
);
159 int err
= file_write_and_wait_range(file
, start
, end
);
163 /* Skip if deferred io is compiled-in but disabled on this fbdev */
168 flush_delayed_work(&info
->deferred_work
);
173 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync
);
176 * Adds a page to the dirty list. Call this from struct
177 * vm_operations_struct.page_mkwrite.
179 static vm_fault_t
fb_deferred_io_track_page(struct fb_info
*info
, unsigned long offset
,
182 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
183 struct fb_deferred_io_pageref
*pageref
;
186 /* protect against the workqueue changing the page list */
187 mutex_lock(&fbdefio
->lock
);
189 pageref
= fb_deferred_io_pageref_get(info
, offset
, page
);
190 if (WARN_ON_ONCE(!pageref
)) {
192 goto err_mutex_unlock
;
196 * We want the page to remain locked from ->page_mkwrite until
197 * the PTE is marked dirty to avoid folio_mkclean() being called
198 * before the PTE is updated, which would leave the page ignored
200 * Do this by locking the page here and informing the caller
201 * about it with VM_FAULT_LOCKED.
203 lock_page(pageref
->page
);
205 mutex_unlock(&fbdefio
->lock
);
207 /* come back after delay to process the deferred IO */
208 schedule_delayed_work(&info
->deferred_work
, fbdefio
->delay
);
209 return VM_FAULT_LOCKED
;
212 mutex_unlock(&fbdefio
->lock
);
217 * fb_deferred_io_page_mkwrite - Mark a page as written for deferred I/O
218 * @fb_info: The fbdev info structure
221 * This is a callback we get when userspace first tries to
222 * write to the page. We schedule a workqueue. That workqueue
223 * will eventually mkclean the touched pages and execute the
224 * deferred framebuffer IO. Then if userspace touches a page
225 * again, we repeat the same scheme.
228 * VM_FAULT_LOCKED on success, or a VM_FAULT error otherwise.
230 static vm_fault_t
fb_deferred_io_page_mkwrite(struct fb_info
*info
, struct vm_fault
*vmf
)
232 unsigned long offset
= vmf
->pgoff
<< PAGE_SHIFT
;
233 struct page
*page
= vmf
->page
;
235 file_update_time(vmf
->vma
->vm_file
);
237 return fb_deferred_io_track_page(info
, offset
, page
);
240 /* vm_ops->page_mkwrite handler */
241 static vm_fault_t
fb_deferred_io_mkwrite(struct vm_fault
*vmf
)
243 struct fb_info
*info
= vmf
->vma
->vm_private_data
;
245 return fb_deferred_io_page_mkwrite(info
, vmf
);
248 static const struct vm_operations_struct fb_deferred_io_vm_ops
= {
249 .fault
= fb_deferred_io_fault
,
250 .page_mkwrite
= fb_deferred_io_mkwrite
,
253 static const struct address_space_operations fb_deferred_io_aops
= {
254 .dirty_folio
= noop_dirty_folio
,
257 int fb_deferred_io_mmap(struct fb_info
*info
, struct vm_area_struct
*vma
)
259 vma
->vm_page_prot
= pgprot_decrypted(vma
->vm_page_prot
);
261 vma
->vm_ops
= &fb_deferred_io_vm_ops
;
262 vm_flags_set(vma
, VM_DONTEXPAND
| VM_DONTDUMP
);
263 if (!(info
->flags
& FBINFO_VIRTFB
))
264 vm_flags_set(vma
, VM_IO
);
265 vma
->vm_private_data
= info
;
268 EXPORT_SYMBOL_GPL(fb_deferred_io_mmap
);
270 /* workqueue callback */
271 static void fb_deferred_io_work(struct work_struct
*work
)
273 struct fb_info
*info
= container_of(work
, struct fb_info
, deferred_work
.work
);
274 struct fb_deferred_io_pageref
*pageref
, *next
;
275 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
277 /* here we mkclean the pages, then do all deferred IO */
278 mutex_lock(&fbdefio
->lock
);
279 list_for_each_entry(pageref
, &fbdefio
->pagereflist
, list
) {
280 struct folio
*folio
= page_folio(pageref
->page
);
283 folio_mkclean(folio
);
287 /* driver's callback with pagereflist */
288 fbdefio
->deferred_io(info
, &fbdefio
->pagereflist
);
291 list_for_each_entry_safe(pageref
, next
, &fbdefio
->pagereflist
, list
)
292 fb_deferred_io_pageref_put(pageref
, info
);
294 mutex_unlock(&fbdefio
->lock
);
297 int fb_deferred_io_init(struct fb_info
*info
)
299 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
300 struct fb_deferred_io_pageref
*pagerefs
;
301 unsigned long npagerefs
;
306 if (WARN_ON(!info
->fix
.smem_len
))
309 mutex_init(&fbdefio
->lock
);
310 INIT_DELAYED_WORK(&info
->deferred_work
, fb_deferred_io_work
);
311 INIT_LIST_HEAD(&fbdefio
->pagereflist
);
312 if (fbdefio
->delay
== 0) /* set a default of 1 s */
315 npagerefs
= DIV_ROUND_UP(info
->fix
.smem_len
, PAGE_SIZE
);
317 /* alloc a page ref for each page of the display memory */
318 pagerefs
= kvcalloc(npagerefs
, sizeof(*pagerefs
), GFP_KERNEL
);
323 info
->npagerefs
= npagerefs
;
324 info
->pagerefs
= pagerefs
;
329 mutex_destroy(&fbdefio
->lock
);
332 EXPORT_SYMBOL_GPL(fb_deferred_io_init
);
334 void fb_deferred_io_open(struct fb_info
*info
,
338 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
340 file
->f_mapping
->a_ops
= &fb_deferred_io_aops
;
341 fbdefio
->open_count
++;
343 EXPORT_SYMBOL_GPL(fb_deferred_io_open
);
345 static void fb_deferred_io_lastclose(struct fb_info
*info
)
349 flush_delayed_work(&info
->deferred_work
);
351 /* clear out the mapping that we setup */
352 for (i
= 0; i
< info
->npagerefs
; ++i
)
353 fb_deferred_io_pageref_clear(&info
->pagerefs
[i
]);
356 void fb_deferred_io_release(struct fb_info
*info
)
358 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
360 if (!--fbdefio
->open_count
)
361 fb_deferred_io_lastclose(info
);
363 EXPORT_SYMBOL_GPL(fb_deferred_io_release
);
365 void fb_deferred_io_cleanup(struct fb_info
*info
)
367 struct fb_deferred_io
*fbdefio
= info
->fbdefio
;
369 fb_deferred_io_lastclose(info
);
371 kvfree(info
->pagerefs
);
372 mutex_destroy(&fbdefio
->lock
);
374 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup
);