1 /*
2 * linux/drivers/video/fb_defio.c
3 *
4 * Copyright (C) 2006 Jaya Kumar
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file COPYING in the main directory of this archive
8 * for more details.
9 */
10
11 #include <linux/module.h>
12 #include <linux/kernel.h>
13 #include <linux/errno.h>
14 #include <linux/string.h>
15 #include <linux/mm.h>
16 #include <linux/vmalloc.h>
17 #include <linux/delay.h>
18 #include <linux/interrupt.h>
19 #include <linux/fb.h>
20 #include <linux/list.h>
21
22 /* to support deferred IO */
23 #include <linux/rmap.h>
24 #include <linux/pagemap.h>
25
fb_deferred_io_page(struct fb_info *info, unsigned long offs)26 static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27 {
28 void *screen_base = (void __force *) info->screen_base;
29 struct page *page;
30
31 if (is_vmalloc_addr(screen_base + offs))
32 page = vmalloc_to_page(screen_base + offs);
33 else
34 page = pfn_to_page((info->fix.smem_start + offs) >> PAGE_SHIFT);
35
36 return page;
37 }
38
39 /* this is to find and return the vmalloc-ed fb pages */
fb_deferred_io_fault(struct vm_fault *vmf)40 static vm_fault_t fb_deferred_io_fault(struct vm_fault *vmf)
41 {
42 unsigned long offset;
43 struct page *page;
44 struct fb_info *info = vmf->vma->vm_private_data;
45
46 offset = vmf->pgoff << PAGE_SHIFT;
47 if (offset >= info->fix.smem_len)
48 return VM_FAULT_SIGBUS;
49
50 page = fb_deferred_io_page(info, offset);
51 if (!page)
52 return VM_FAULT_SIGBUS;
53
54 get_page(page);
55
56 if (vmf->vma->vm_file)
57 page->mapping = vmf->vma->vm_file->f_mapping;
58 else
59 printk(KERN_ERR "no mapping available\n");
60
61 BUG_ON(!page->mapping);
62 page->index = vmf->pgoff;
63
64 vmf->page = page;
65 return 0;
66 }
67
fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)68 int fb_deferred_io_fsync(struct file *file, loff_t start, loff_t end, int datasync)
69 {
70 struct fb_info *info = file->private_data;
71 struct inode *inode = file_inode(file);
72 int err = file_write_and_wait_range(file, start, end);
73 if (err)
74 return err;
75
76 /* Skip if deferred io is compiled-in but disabled on this fbdev */
77 if (!info->fbdefio)
78 return 0;
79
80 inode_lock(inode);
81 flush_delayed_work(&info->deferred_work);
82 inode_unlock(inode);
83
84 return 0;
85 }
86 EXPORT_SYMBOL_GPL(fb_deferred_io_fsync);
87
88 /* vm_ops->page_mkwrite handler */
fb_deferred_io_mkwrite(struct vm_fault *vmf)89 static vm_fault_t fb_deferred_io_mkwrite(struct vm_fault *vmf)
90 {
91 struct page *page = vmf->page;
92 struct fb_info *info = vmf->vma->vm_private_data;
93 struct fb_deferred_io *fbdefio = info->fbdefio;
94 struct page *cur;
95
96 /* this is a callback we get when userspace first tries to
97 write to the page. we schedule a workqueue. that workqueue
98 will eventually mkclean the touched pages and execute the
99 deferred framebuffer IO. then if userspace touches a page
100 again, we repeat the same scheme */
101
102 file_update_time(vmf->vma->vm_file);
103
104 /* protect against the workqueue changing the page list */
105 mutex_lock(&fbdefio->lock);
106
107 /* first write in this cycle, notify the driver */
108 if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
109 fbdefio->first_io(info);
110
111 /*
112 * We want the page to remain locked from ->page_mkwrite until
113 * the PTE is marked dirty to avoid page_mkclean() being called
114 * before the PTE is updated, which would leave the page ignored
115 * by defio.
116 * Do this by locking the page here and informing the caller
117 * about it with VM_FAULT_LOCKED.
118 */
119 lock_page(page);
120
121 /* we loop through the pagelist before adding in order
122 to keep the pagelist sorted */
123 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
124 /* this check is to catch the case where a new
125 process could start writing to the same page
126 through a new pte. this new access can cause the
127 mkwrite even when the original ps's pte is marked
128 writable */
129 if (unlikely(cur == page))
130 goto page_already_added;
131 else if (cur->index > page->index)
132 break;
133 }
134
135 list_add_tail(&page->lru, &cur->lru);
136
137 page_already_added:
138 mutex_unlock(&fbdefio->lock);
139
140 /* come back after delay to process the deferred IO */
141 schedule_delayed_work(&info->deferred_work, fbdefio->delay);
142 return VM_FAULT_LOCKED;
143 }
144
145 static const struct vm_operations_struct fb_deferred_io_vm_ops = {
146 .fault = fb_deferred_io_fault,
147 .page_mkwrite = fb_deferred_io_mkwrite,
148 };
149
fb_deferred_io_set_page_dirty(struct page *page)150 static int fb_deferred_io_set_page_dirty(struct page *page)
151 {
152 if (!PageDirty(page))
153 SetPageDirty(page);
154 return 0;
155 }
156
157 static const struct address_space_operations fb_deferred_io_aops = {
158 .set_page_dirty = fb_deferred_io_set_page_dirty,
159 };
160
fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)161 int fb_deferred_io_mmap(struct fb_info *info, struct vm_area_struct *vma)
162 {
163 vma->vm_ops = &fb_deferred_io_vm_ops;
164 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
165 if (!(info->flags & FBINFO_VIRTFB))
166 vma->vm_flags |= VM_IO;
167 vma->vm_private_data = info;
168 return 0;
169 }
170
171 /* workqueue callback */
fb_deferred_io_work(struct work_struct *work)172 static void fb_deferred_io_work(struct work_struct *work)
173 {
174 struct fb_info *info = container_of(work, struct fb_info,
175 deferred_work.work);
176 struct list_head *node, *next;
177 struct page *cur;
178 struct fb_deferred_io *fbdefio = info->fbdefio;
179
180 /* here we mkclean the pages, then do all deferred IO */
181 mutex_lock(&fbdefio->lock);
182 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
183 lock_page(cur);
184 page_mkclean(cur);
185 unlock_page(cur);
186 }
187
188 /* driver's callback with pagelist */
189 fbdefio->deferred_io(info, &fbdefio->pagelist);
190
191 /* clear the list */
192 list_for_each_safe(node, next, &fbdefio->pagelist) {
193 list_del(node);
194 }
195 mutex_unlock(&fbdefio->lock);
196 }
197
fb_deferred_io_init(struct fb_info *info)198 void fb_deferred_io_init(struct fb_info *info)
199 {
200 struct fb_deferred_io *fbdefio = info->fbdefio;
201
202 BUG_ON(!fbdefio);
203 mutex_init(&fbdefio->lock);
204 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
205 INIT_LIST_HEAD(&fbdefio->pagelist);
206 if (fbdefio->delay == 0) /* set a default of 1 s */
207 fbdefio->delay = HZ;
208 }
209 EXPORT_SYMBOL_GPL(fb_deferred_io_init);
210
fb_deferred_io_open(struct fb_info *info, struct inode *inode, struct file *file)211 void fb_deferred_io_open(struct fb_info *info,
212 struct inode *inode,
213 struct file *file)
214 {
215 file->f_mapping->a_ops = &fb_deferred_io_aops;
216 }
217 EXPORT_SYMBOL_GPL(fb_deferred_io_open);
218
fb_deferred_io_cleanup(struct fb_info *info)219 void fb_deferred_io_cleanup(struct fb_info *info)
220 {
221 struct fb_deferred_io *fbdefio = info->fbdefio;
222 struct page *page;
223 int i;
224
225 BUG_ON(!fbdefio);
226 cancel_delayed_work_sync(&info->deferred_work);
227
228 /* clear out the mapping that we setup */
229 for (i = 0 ; i < info->fix.smem_len; i += PAGE_SIZE) {
230 page = fb_deferred_io_page(info, i);
231 page->mapping = NULL;
232 }
233
234 mutex_destroy(&fbdefio->lock);
235 }
236 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
237