Merge branch 'master' into export-slabh
[kernel.git] / drivers / gpu / drm / ttm / ttm_tt.c
1 /**************************************************************************
2  *
3  * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
4  * All Rights Reserved.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sub license, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice (including the
15  * next paragraph) shall be included in all copies or substantial portions
16  * of the Software.
17  *
18  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
19  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
20  * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
22  * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
23  * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
24  * USE OR OTHER DEALINGS IN THE SOFTWARE.
25  *
26  **************************************************************************/
27 /*
28  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
29  */
30
31 #include <linux/sched.h>
32 #include <linux/highmem.h>
33 #include <linux/pagemap.h>
34 #include <linux/file.h>
35 #include <linux/swap.h>
36 #include <linux/slab.h>
37 #include "drm_cache.h"
38 #include "drm_mem_util.h"
39 #include "ttm/ttm_module.h"
40 #include "ttm/ttm_bo_driver.h"
41 #include "ttm/ttm_placement.h"
42
43 static int ttm_tt_swapin(struct ttm_tt *ttm);
44
45 /**
46  * Allocates storage for pointers to the pages that back the ttm.
47  */
48 static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
49 {
50         ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
51 }
52
53 static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
54 {
55         drm_free_large(ttm->pages);
56         ttm->pages = NULL;
57 }
58
59 static struct page *ttm_tt_alloc_page(unsigned page_flags)
60 {
61         gfp_t gfp_flags = GFP_USER;
62
63         if (page_flags & TTM_PAGE_FLAG_ZERO_ALLOC)
64                 gfp_flags |= __GFP_ZERO;
65
66         if (page_flags & TTM_PAGE_FLAG_DMA32)
67                 gfp_flags |= __GFP_DMA32;
68         else
69                 gfp_flags |= __GFP_HIGHMEM;
70
71         return alloc_page(gfp_flags);
72 }
73
74 static void ttm_tt_free_user_pages(struct ttm_tt *ttm)
75 {
76         int write;
77         int dirty;
78         struct page *page;
79         int i;
80         struct ttm_backend *be = ttm->be;
81
82         BUG_ON(!(ttm->page_flags & TTM_PAGE_FLAG_USER));
83         write = ((ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0);
84         dirty = ((ttm->page_flags & TTM_PAGE_FLAG_USER_DIRTY) != 0);
85
86         if (be)
87                 be->func->clear(be);
88
89         for (i = 0; i < ttm->num_pages; ++i) {
90                 page = ttm->pages[i];
91                 if (page == NULL)
92                         continue;
93
94                 if (page == ttm->dummy_read_page) {
95                         BUG_ON(write);
96                         continue;
97                 }
98
99                 if (write && dirty && !PageReserved(page))
100                         set_page_dirty_lock(page);
101
102                 ttm->pages[i] = NULL;
103                 ttm_mem_global_free(ttm->glob->mem_glob, PAGE_SIZE);
104                 put_page(page);
105         }
106         ttm->state = tt_unpopulated;
107         ttm->first_himem_page = ttm->num_pages;
108         ttm->last_lomem_page = -1;
109 }
110
111 static struct page *__ttm_tt_get_page(struct ttm_tt *ttm, int index)
112 {
113         struct page *p;
114         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
115         int ret;
116
117         while (NULL == (p = ttm->pages[index])) {
118                 p = ttm_tt_alloc_page(ttm->page_flags);
119
120                 if (!p)
121                         return NULL;
122
123                 ret = ttm_mem_global_alloc_page(mem_glob, p, false, false);
124                 if (unlikely(ret != 0))
125                         goto out_err;
126
127                 if (PageHighMem(p))
128                         ttm->pages[--ttm->first_himem_page] = p;
129                 else
130                         ttm->pages[++ttm->last_lomem_page] = p;
131         }
132         return p;
133 out_err:
134         put_page(p);
135         return NULL;
136 }
137
138 struct page *ttm_tt_get_page(struct ttm_tt *ttm, int index)
139 {
140         int ret;
141
142         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
143                 ret = ttm_tt_swapin(ttm);
144                 if (unlikely(ret != 0))
145                         return NULL;
146         }
147         return __ttm_tt_get_page(ttm, index);
148 }
149
150 int ttm_tt_populate(struct ttm_tt *ttm)
151 {
152         struct page *page;
153         unsigned long i;
154         struct ttm_backend *be;
155         int ret;
156
157         if (ttm->state != tt_unpopulated)
158                 return 0;
159
160         if (unlikely(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)) {
161                 ret = ttm_tt_swapin(ttm);
162                 if (unlikely(ret != 0))
163                         return ret;
164         }
165
166         be = ttm->be;
167
168         for (i = 0; i < ttm->num_pages; ++i) {
169                 page = __ttm_tt_get_page(ttm, i);
170                 if (!page)
171                         return -ENOMEM;
172         }
173
174         be->func->populate(be, ttm->num_pages, ttm->pages,
175                            ttm->dummy_read_page);
176         ttm->state = tt_unbound;
177         return 0;
178 }
179 EXPORT_SYMBOL(ttm_tt_populate);
180
181 #ifdef CONFIG_X86
182 static inline int ttm_tt_set_page_caching(struct page *p,
183                                           enum ttm_caching_state c_old,
184                                           enum ttm_caching_state c_new)
185 {
186         int ret = 0;
187
188         if (PageHighMem(p))
189                 return 0;
190
191         if (c_old != tt_cached) {
192                 /* p isn't in the default caching state, set it to
193                  * writeback first to free its current memtype. */
194
195                 ret = set_pages_wb(p, 1);
196                 if (ret)
197                         return ret;
198         }
199
200         if (c_new == tt_wc)
201                 ret = set_memory_wc((unsigned long) page_address(p), 1);
202         else if (c_new == tt_uncached)
203                 ret = set_pages_uc(p, 1);
204
205         return ret;
206 }
207 #else /* CONFIG_X86 */
208 static inline int ttm_tt_set_page_caching(struct page *p,
209                                           enum ttm_caching_state c_old,
210                                           enum ttm_caching_state c_new)
211 {
212         return 0;
213 }
214 #endif /* CONFIG_X86 */
215
216 /*
217  * Change caching policy for the linear kernel map
218  * for range of pages in a ttm.
219  */
220
221 static int ttm_tt_set_caching(struct ttm_tt *ttm,
222                               enum ttm_caching_state c_state)
223 {
224         int i, j;
225         struct page *cur_page;
226         int ret;
227
228         if (ttm->caching_state == c_state)
229                 return 0;
230
231         if (c_state != tt_cached) {
232                 ret = ttm_tt_populate(ttm);
233                 if (unlikely(ret != 0))
234                         return ret;
235         }
236
237         if (ttm->caching_state == tt_cached)
238                 drm_clflush_pages(ttm->pages, ttm->num_pages);
239
240         for (i = 0; i < ttm->num_pages; ++i) {
241                 cur_page = ttm->pages[i];
242                 if (likely(cur_page != NULL)) {
243                         ret = ttm_tt_set_page_caching(cur_page,
244                                                       ttm->caching_state,
245                                                       c_state);
246                         if (unlikely(ret != 0))
247                                 goto out_err;
248                 }
249         }
250
251         ttm->caching_state = c_state;
252
253         return 0;
254
255 out_err:
256         for (j = 0; j < i; ++j) {
257                 cur_page = ttm->pages[j];
258                 if (likely(cur_page != NULL)) {
259                         (void)ttm_tt_set_page_caching(cur_page, c_state,
260                                                       ttm->caching_state);
261                 }
262         }
263
264         return ret;
265 }
266
267 int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement)
268 {
269         enum ttm_caching_state state;
270
271         if (placement & TTM_PL_FLAG_WC)
272                 state = tt_wc;
273         else if (placement & TTM_PL_FLAG_UNCACHED)
274                 state = tt_uncached;
275         else
276                 state = tt_cached;
277
278         return ttm_tt_set_caching(ttm, state);
279 }
280 EXPORT_SYMBOL(ttm_tt_set_placement_caching);
281
282 static void ttm_tt_free_alloced_pages(struct ttm_tt *ttm)
283 {
284         int i;
285         struct page *cur_page;
286         struct ttm_backend *be = ttm->be;
287
288         if (be)
289                 be->func->clear(be);
290         (void)ttm_tt_set_caching(ttm, tt_cached);
291         for (i = 0; i < ttm->num_pages; ++i) {
292                 cur_page = ttm->pages[i];
293                 ttm->pages[i] = NULL;
294                 if (cur_page) {
295                         if (page_count(cur_page) != 1)
296                                 printk(KERN_ERR TTM_PFX
297                                        "Erroneous page count. "
298                                        "Leaking pages.\n");
299                         ttm_mem_global_free_page(ttm->glob->mem_glob,
300                                                  cur_page);
301                         __free_page(cur_page);
302                 }
303         }
304         ttm->state = tt_unpopulated;
305         ttm->first_himem_page = ttm->num_pages;
306         ttm->last_lomem_page = -1;
307 }
308
309 void ttm_tt_destroy(struct ttm_tt *ttm)
310 {
311         struct ttm_backend *be;
312
313         if (unlikely(ttm == NULL))
314                 return;
315
316         be = ttm->be;
317         if (likely(be != NULL)) {
318                 be->func->destroy(be);
319                 ttm->be = NULL;
320         }
321
322         if (likely(ttm->pages != NULL)) {
323                 if (ttm->page_flags & TTM_PAGE_FLAG_USER)
324                         ttm_tt_free_user_pages(ttm);
325                 else
326                         ttm_tt_free_alloced_pages(ttm);
327
328                 ttm_tt_free_page_directory(ttm);
329         }
330
331         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP) &&
332             ttm->swap_storage)
333                 fput(ttm->swap_storage);
334
335         kfree(ttm);
336 }
337
338 int ttm_tt_set_user(struct ttm_tt *ttm,
339                     struct task_struct *tsk,
340                     unsigned long start, unsigned long num_pages)
341 {
342         struct mm_struct *mm = tsk->mm;
343         int ret;
344         int write = (ttm->page_flags & TTM_PAGE_FLAG_WRITE) != 0;
345         struct ttm_mem_global *mem_glob = ttm->glob->mem_glob;
346
347         BUG_ON(num_pages != ttm->num_pages);
348         BUG_ON((ttm->page_flags & TTM_PAGE_FLAG_USER) == 0);
349
350         /**
351          * Account user pages as lowmem pages for now.
352          */
353
354         ret = ttm_mem_global_alloc(mem_glob, num_pages * PAGE_SIZE,
355                                    false, false);
356         if (unlikely(ret != 0))
357                 return ret;
358
359         down_read(&mm->mmap_sem);
360         ret = get_user_pages(tsk, mm, start, num_pages,
361                              write, 0, ttm->pages, NULL);
362         up_read(&mm->mmap_sem);
363
364         if (ret != num_pages && write) {
365                 ttm_tt_free_user_pages(ttm);
366                 ttm_mem_global_free(mem_glob, num_pages * PAGE_SIZE);
367                 return -ENOMEM;
368         }
369
370         ttm->tsk = tsk;
371         ttm->start = start;
372         ttm->state = tt_unbound;
373
374         return 0;
375 }
376
377 struct ttm_tt *ttm_tt_create(struct ttm_bo_device *bdev, unsigned long size,
378                              uint32_t page_flags, struct page *dummy_read_page)
379 {
380         struct ttm_bo_driver *bo_driver = bdev->driver;
381         struct ttm_tt *ttm;
382
383         if (!bo_driver)
384                 return NULL;
385
386         ttm = kzalloc(sizeof(*ttm), GFP_KERNEL);
387         if (!ttm)
388                 return NULL;
389
390         ttm->glob = bdev->glob;
391         ttm->num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
392         ttm->first_himem_page = ttm->num_pages;
393         ttm->last_lomem_page = -1;
394         ttm->caching_state = tt_cached;
395         ttm->page_flags = page_flags;
396
397         ttm->dummy_read_page = dummy_read_page;
398
399         ttm_tt_alloc_page_directory(ttm);
400         if (!ttm->pages) {
401                 ttm_tt_destroy(ttm);
402                 printk(KERN_ERR TTM_PFX "Failed allocating page table\n");
403                 return NULL;
404         }
405         ttm->be = bo_driver->create_ttm_backend_entry(bdev);
406         if (!ttm->be) {
407                 ttm_tt_destroy(ttm);
408                 printk(KERN_ERR TTM_PFX "Failed creating ttm backend entry\n");
409                 return NULL;
410         }
411         ttm->state = tt_unpopulated;
412         return ttm;
413 }
414
415 void ttm_tt_unbind(struct ttm_tt *ttm)
416 {
417         int ret;
418         struct ttm_backend *be = ttm->be;
419
420         if (ttm->state == tt_bound) {
421                 ret = be->func->unbind(be);
422                 BUG_ON(ret);
423                 ttm->state = tt_unbound;
424         }
425 }
426
427 int ttm_tt_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem)
428 {
429         int ret = 0;
430         struct ttm_backend *be;
431
432         if (!ttm)
433                 return -EINVAL;
434
435         if (ttm->state == tt_bound)
436                 return 0;
437
438         be = ttm->be;
439
440         ret = ttm_tt_populate(ttm);
441         if (ret)
442                 return ret;
443
444         ret = be->func->bind(be, bo_mem);
445         if (ret) {
446                 printk(KERN_ERR TTM_PFX "Couldn't bind backend.\n");
447                 return ret;
448         }
449
450         ttm->state = tt_bound;
451
452         if (ttm->page_flags & TTM_PAGE_FLAG_USER)
453                 ttm->page_flags |= TTM_PAGE_FLAG_USER_DIRTY;
454         return 0;
455 }
456 EXPORT_SYMBOL(ttm_tt_bind);
457
458 static int ttm_tt_swapin(struct ttm_tt *ttm)
459 {
460         struct address_space *swap_space;
461         struct file *swap_storage;
462         struct page *from_page;
463         struct page *to_page;
464         void *from_virtual;
465         void *to_virtual;
466         int i;
467         int ret = -ENOMEM;
468
469         if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
470                 ret = ttm_tt_set_user(ttm, ttm->tsk, ttm->start,
471                                       ttm->num_pages);
472                 if (unlikely(ret != 0))
473                         return ret;
474
475                 ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
476                 return 0;
477         }
478
479         swap_storage = ttm->swap_storage;
480         BUG_ON(swap_storage == NULL);
481
482         swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
483
484         for (i = 0; i < ttm->num_pages; ++i) {
485                 from_page = read_mapping_page(swap_space, i, NULL);
486                 if (IS_ERR(from_page)) {
487                         ret = PTR_ERR(from_page);
488                         goto out_err;
489                 }
490                 to_page = __ttm_tt_get_page(ttm, i);
491                 if (unlikely(to_page == NULL))
492                         goto out_err;
493
494                 preempt_disable();
495                 from_virtual = kmap_atomic(from_page, KM_USER0);
496                 to_virtual = kmap_atomic(to_page, KM_USER1);
497                 memcpy(to_virtual, from_virtual, PAGE_SIZE);
498                 kunmap_atomic(to_virtual, KM_USER1);
499                 kunmap_atomic(from_virtual, KM_USER0);
500                 preempt_enable();
501                 page_cache_release(from_page);
502         }
503
504         if (!(ttm->page_flags & TTM_PAGE_FLAG_PERSISTANT_SWAP))
505                 fput(swap_storage);
506         ttm->swap_storage = NULL;
507         ttm->page_flags &= ~TTM_PAGE_FLAG_SWAPPED;
508
509         return 0;
510 out_err:
511         ttm_tt_free_alloced_pages(ttm);
512         return ret;
513 }
514
515 int ttm_tt_swapout(struct ttm_tt *ttm, struct file *persistant_swap_storage)
516 {
517         struct address_space *swap_space;
518         struct file *swap_storage;
519         struct page *from_page;
520         struct page *to_page;
521         void *from_virtual;
522         void *to_virtual;
523         int i;
524         int ret = -ENOMEM;
525
526         BUG_ON(ttm->state != tt_unbound && ttm->state != tt_unpopulated);
527         BUG_ON(ttm->caching_state != tt_cached);
528
529         /*
530          * For user buffers, just unpin the pages, as there should be
531          * vma references.
532          */
533
534         if (ttm->page_flags & TTM_PAGE_FLAG_USER) {
535                 ttm_tt_free_user_pages(ttm);
536                 ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
537                 ttm->swap_storage = NULL;
538                 return 0;
539         }
540
541         if (!persistant_swap_storage) {
542                 swap_storage = shmem_file_setup("ttm swap",
543                                                 ttm->num_pages << PAGE_SHIFT,
544                                                 0);
545                 if (unlikely(IS_ERR(swap_storage))) {
546                         printk(KERN_ERR "Failed allocating swap storage.\n");
547                         return PTR_ERR(swap_storage);
548                 }
549         } else
550                 swap_storage = persistant_swap_storage;
551
552         swap_space = swap_storage->f_path.dentry->d_inode->i_mapping;
553
554         for (i = 0; i < ttm->num_pages; ++i) {
555                 from_page = ttm->pages[i];
556                 if (unlikely(from_page == NULL))
557                         continue;
558                 to_page = read_mapping_page(swap_space, i, NULL);
559                 if (unlikely(IS_ERR(to_page))) {
560                         ret = PTR_ERR(to_page);
561                         goto out_err;
562                 }
563                 preempt_disable();
564                 from_virtual = kmap_atomic(from_page, KM_USER0);
565                 to_virtual = kmap_atomic(to_page, KM_USER1);
566                 memcpy(to_virtual, from_virtual, PAGE_SIZE);
567                 kunmap_atomic(to_virtual, KM_USER1);
568                 kunmap_atomic(from_virtual, KM_USER0);
569                 preempt_enable();
570                 set_page_dirty(to_page);
571                 mark_page_accessed(to_page);
572                 page_cache_release(to_page);
573         }
574
575         ttm_tt_free_alloced_pages(ttm);
576         ttm->swap_storage = swap_storage;
577         ttm->page_flags |= TTM_PAGE_FLAG_SWAPPED;
578         if (persistant_swap_storage)
579                 ttm->page_flags |= TTM_PAGE_FLAG_PERSISTANT_SWAP;
580
581         return 0;
582 out_err:
583         if (!persistant_swap_storage)
584                 fput(swap_storage);
585
586         return ret;
587 }