1 /**************************************************************************
2 * Copyright (c) 2007, Intel Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 **************************************************************************/
23 * Code for the SGX MMU:
27 * clflush on one processor only:
28 * clflush should apparently flush the cache line on all processors in an
34 * The usage of the slots must be completely encapsulated within a spinlock, and
35 * no other functions that may be using the locks for other purposed may be
36 * called from within the locked region.
37 * Since the slots are per processor, this will guarantee that we are the only
42 * TODO: Inserting ptes from an interrupt handler:
43 * This may be desirable for some SGX functionality where the GPU can fault in
44 * needed pages. For that, we need to make an atomic insert_pages function, that
46 * If it fails, the caller need to insert the page using a workqueue function,
47 * but on average it should be fast.
50 struct psb_mmu_driver
{
51 /* protects driver- and pd structures. Always take in read mode
52 * before taking the page table spinlock.
54 struct rw_semaphore sem
;
56 /* protects page tables, directory tables and pt tables.
61 atomic_t needs_tlbflush
;
63 uint8_t __iomem
*register_map
;
64 struct psb_mmu_pd
*default_pd
;
65 /*uint32_t bif_ctrl;*/
68 unsigned long clflush_mask
;
70 struct drm_psb_private
*dev_priv
;
76 struct psb_mmu_pd
*pd
;
84 struct psb_mmu_driver
*driver
;
86 struct psb_mmu_pt
**tables
;
88 struct page
*dummy_pt
;
89 struct page
*dummy_page
;
95 static inline uint32_t psb_mmu_pt_index(uint32_t offset
)
97 return (offset
>> PSB_PTE_SHIFT
) & 0x3FF;
100 static inline uint32_t psb_mmu_pd_index(uint32_t offset
)
102 return offset
>> PSB_PDE_SHIFT
;
105 static inline void psb_clflush(void *addr
)
107 __asm__
__volatile__("clflush (%0)\n" : : "r"(addr
) : "memory");
110 static inline void psb_mmu_clflush(struct psb_mmu_driver
*driver
,
113 if (!driver
->has_clflush
)
121 static void psb_page_clflush(struct psb_mmu_driver
*driver
, struct page
* page
)
123 uint32_t clflush_add
= driver
->clflush_add
>> PAGE_SHIFT
;
124 uint32_t clflush_count
= PAGE_SIZE
/ clflush_add
;
128 clf
= kmap_atomic(page
);
130 for (i
= 0; i
< clflush_count
; ++i
) {
138 static void psb_pages_clflush(struct psb_mmu_driver
*driver
,
139 struct page
*page
[], unsigned long num_pages
)
143 if (!driver
->has_clflush
)
146 for (i
= 0; i
< num_pages
; i
++)
147 psb_page_clflush(driver
, *page
++);
150 static void psb_mmu_flush_pd_locked(struct psb_mmu_driver
*driver
,
153 atomic_set(&driver
->needs_tlbflush
, 0);
156 static void psb_mmu_flush_pd(struct psb_mmu_driver
*driver
, int force
)
158 down_write(&driver
->sem
);
159 psb_mmu_flush_pd_locked(driver
, force
);
160 up_write(&driver
->sem
);
163 void psb_mmu_flush(struct psb_mmu_driver
*driver
, int rc_prot
)
166 down_write(&driver
->sem
);
168 up_write(&driver
->sem
);
171 void psb_mmu_set_pd_context(struct psb_mmu_pd
*pd
, int hw_context
)
173 /*ttm_tt_cache_flush(&pd->p, 1);*/
174 psb_pages_clflush(pd
->driver
, &pd
->p
, 1);
175 down_write(&pd
->driver
->sem
);
177 psb_mmu_flush_pd_locked(pd
->driver
, 1);
178 pd
->hw_context
= hw_context
;
179 up_write(&pd
->driver
->sem
);
183 static inline unsigned long psb_pd_addr_end(unsigned long addr
,
187 addr
= (addr
+ PSB_PDE_MASK
+ 1) & ~PSB_PDE_MASK
;
188 return (addr
< end
) ? addr
: end
;
191 static inline uint32_t psb_mmu_mask_pte(uint32_t pfn
, int type
)
193 uint32_t mask
= PSB_PTE_VALID
;
195 if (type
& PSB_MMU_CACHED_MEMORY
)
196 mask
|= PSB_PTE_CACHED
;
197 if (type
& PSB_MMU_RO_MEMORY
)
199 if (type
& PSB_MMU_WO_MEMORY
)
202 return (pfn
<< PAGE_SHIFT
) | mask
;
205 struct psb_mmu_pd
*psb_mmu_alloc_pd(struct psb_mmu_driver
*driver
,
206 int trap_pagefaults
, int invalid_type
)
208 struct psb_mmu_pd
*pd
= kmalloc(sizeof(*pd
), GFP_KERNEL
);
215 pd
->p
= alloc_page(GFP_DMA32
);
218 pd
->dummy_pt
= alloc_page(GFP_DMA32
);
221 pd
->dummy_page
= alloc_page(GFP_DMA32
);
225 if (!trap_pagefaults
) {
227 psb_mmu_mask_pte(page_to_pfn(pd
->dummy_pt
),
230 psb_mmu_mask_pte(page_to_pfn(pd
->dummy_page
),
237 v
= kmap(pd
->dummy_pt
);
238 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(uint32_t)); ++i
)
239 v
[i
] = pd
->invalid_pte
;
241 kunmap(pd
->dummy_pt
);
244 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(uint32_t)); ++i
)
245 v
[i
] = pd
->invalid_pde
;
249 clear_page(kmap(pd
->dummy_page
));
250 kunmap(pd
->dummy_page
);
252 pd
->tables
= vmalloc_user(sizeof(struct psb_mmu_pt
*) * 1024);
257 pd
->pd_mask
= PSB_PTE_VALID
;
263 __free_page(pd
->dummy_page
);
265 __free_page(pd
->dummy_pt
);
273 static void psb_mmu_free_pt(struct psb_mmu_pt
*pt
)
279 void psb_mmu_free_pagedir(struct psb_mmu_pd
*pd
)
281 struct psb_mmu_driver
*driver
= pd
->driver
;
282 struct psb_mmu_pt
*pt
;
285 down_write(&driver
->sem
);
286 if (pd
->hw_context
!= -1)
287 psb_mmu_flush_pd_locked(driver
, 1);
289 /* Should take the spinlock here, but we don't need to do that
290 since we have the semaphore in write mode. */
292 for (i
= 0; i
< 1024; ++i
) {
299 __free_page(pd
->dummy_page
);
300 __free_page(pd
->dummy_pt
);
303 up_write(&driver
->sem
);
306 static struct psb_mmu_pt
*psb_mmu_alloc_pt(struct psb_mmu_pd
*pd
)
308 struct psb_mmu_pt
*pt
= kmalloc(sizeof(*pt
), GFP_KERNEL
);
310 uint32_t clflush_add
= pd
->driver
->clflush_add
>> PAGE_SHIFT
;
311 uint32_t clflush_count
= PAGE_SIZE
/ clflush_add
;
312 spinlock_t
*lock
= &pd
->driver
->lock
;
320 pt
->p
= alloc_page(GFP_DMA32
);
328 v
= kmap_atomic(pt
->p
);
330 ptes
= (uint32_t *) v
;
331 for (i
= 0; i
< (PAGE_SIZE
/ sizeof(uint32_t)); ++i
)
332 *ptes
++ = pd
->invalid_pte
;
335 if (pd
->driver
->has_clflush
&& pd
->hw_context
!= -1) {
337 for (i
= 0; i
< clflush_count
; ++i
) {
354 static struct psb_mmu_pt
*psb_mmu_pt_alloc_map_lock(struct psb_mmu_pd
*pd
,
357 uint32_t index
= psb_mmu_pd_index(addr
);
358 struct psb_mmu_pt
*pt
;
360 spinlock_t
*lock
= &pd
->driver
->lock
;
363 pt
= pd
->tables
[index
];
366 pt
= psb_mmu_alloc_pt(pd
);
371 if (pd
->tables
[index
]) {
375 pt
= pd
->tables
[index
];
379 v
= kmap_atomic(pd
->p
);
380 pd
->tables
[index
] = pt
;
381 v
[index
] = (page_to_pfn(pt
->p
) << 12) | pd
->pd_mask
;
383 kunmap_atomic((void *) v
);
385 if (pd
->hw_context
!= -1) {
386 psb_mmu_clflush(pd
->driver
, (void *) &v
[index
]);
387 atomic_set(&pd
->driver
->needs_tlbflush
, 1);
390 pt
->v
= kmap_atomic(pt
->p
);
394 static struct psb_mmu_pt
*psb_mmu_pt_map_lock(struct psb_mmu_pd
*pd
,
397 uint32_t index
= psb_mmu_pd_index(addr
);
398 struct psb_mmu_pt
*pt
;
399 spinlock_t
*lock
= &pd
->driver
->lock
;
402 pt
= pd
->tables
[index
];
407 pt
->v
= kmap_atomic(pt
->p
);
411 static void psb_mmu_pt_unmap_unlock(struct psb_mmu_pt
*pt
)
413 struct psb_mmu_pd
*pd
= pt
->pd
;
416 kunmap_atomic(pt
->v
);
417 if (pt
->count
== 0) {
418 v
= kmap_atomic(pd
->p
);
419 v
[pt
->index
] = pd
->invalid_pde
;
420 pd
->tables
[pt
->index
] = NULL
;
422 if (pd
->hw_context
!= -1) {
423 psb_mmu_clflush(pd
->driver
,
424 (void *) &v
[pt
->index
]);
425 atomic_set(&pd
->driver
->needs_tlbflush
, 1);
427 kunmap_atomic(pt
->v
);
428 spin_unlock(&pd
->driver
->lock
);
432 spin_unlock(&pd
->driver
->lock
);
435 static inline void psb_mmu_set_pte(struct psb_mmu_pt
*pt
,
436 unsigned long addr
, uint32_t pte
)
438 pt
->v
[psb_mmu_pt_index(addr
)] = pte
;
441 static inline void psb_mmu_invalidate_pte(struct psb_mmu_pt
*pt
,
444 pt
->v
[psb_mmu_pt_index(addr
)] = pt
->pd
->invalid_pte
;
448 void psb_mmu_mirror_gtt(struct psb_mmu_pd
*pd
,
449 uint32_t mmu_offset
, uint32_t gtt_start
,
453 uint32_t start
= psb_mmu_pd_index(mmu_offset
);
454 struct psb_mmu_driver
*driver
= pd
->driver
;
455 int num_pages
= gtt_pages
;
457 down_read(&driver
->sem
);
458 spin_lock(&driver
->lock
);
460 v
= kmap_atomic(pd
->p
);
463 while (gtt_pages
--) {
464 *v
++ = gtt_start
| pd
->pd_mask
;
465 gtt_start
+= PAGE_SIZE
;
468 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
469 psb_pages_clflush(pd
->driver
, &pd
->p
, num_pages
);
471 spin_unlock(&driver
->lock
);
473 if (pd
->hw_context
!= -1)
474 atomic_set(&pd
->driver
->needs_tlbflush
, 1);
476 up_read(&pd
->driver
->sem
);
477 psb_mmu_flush_pd(pd
->driver
, 0);
480 struct psb_mmu_pd
*psb_mmu_get_default_pd(struct psb_mmu_driver
*driver
)
482 struct psb_mmu_pd
*pd
;
484 /* down_read(&driver->sem); */
485 pd
= driver
->default_pd
;
486 /* up_read(&driver->sem); */
491 void psb_mmu_driver_takedown(struct psb_mmu_driver
*driver
)
493 psb_mmu_free_pagedir(driver
->default_pd
);
497 struct psb_mmu_driver
*psb_mmu_driver_init(uint8_t __iomem
* registers
,
500 struct drm_psb_private
*dev_priv
)
502 struct psb_mmu_driver
*driver
;
504 driver
= kmalloc(sizeof(*driver
), GFP_KERNEL
);
508 driver
->dev_priv
= dev_priv
;
510 driver
->default_pd
= psb_mmu_alloc_pd(driver
, trap_pagefaults
,
512 if (!driver
->default_pd
)
515 spin_lock_init(&driver
->lock
);
516 init_rwsem(&driver
->sem
);
517 down_write(&driver
->sem
);
518 driver
->register_map
= registers
;
519 atomic_set(&driver
->needs_tlbflush
, 1);
521 driver
->has_clflush
= 0;
523 if (boot_cpu_has(X86_FEATURE_CLFLSH
)) {
524 uint32_t tfms
, misc
, cap0
, cap4
, clflush_size
;
527 * clflush size is determined at kernel setup for x86_64
528 * but not for i386. We have to do it here.
531 cpuid(0x00000001, &tfms
, &misc
, &cap0
, &cap4
);
532 clflush_size
= ((misc
>> 8) & 0xff) * 8;
533 driver
->has_clflush
= 1;
534 driver
->clflush_add
=
535 PAGE_SIZE
* clflush_size
/ sizeof(uint32_t);
536 driver
->clflush_mask
= driver
->clflush_add
- 1;
537 driver
->clflush_mask
= ~driver
->clflush_mask
;
540 up_write(&driver
->sem
);
548 static void psb_mmu_flush_ptes(struct psb_mmu_pd
*pd
,
549 unsigned long address
, uint32_t num_pages
,
550 uint32_t desired_tile_stride
,
551 uint32_t hw_tile_stride
)
553 struct psb_mmu_pt
*pt
;
560 unsigned long row_add
;
561 unsigned long clflush_add
= pd
->driver
->clflush_add
;
562 unsigned long clflush_mask
= pd
->driver
->clflush_mask
;
564 if (!pd
->driver
->has_clflush
) {
565 /*ttm_tt_cache_flush(&pd->p, num_pages);*/
566 psb_pages_clflush(pd
->driver
, &pd
->p
, num_pages
);
571 rows
= num_pages
/ desired_tile_stride
;
573 desired_tile_stride
= num_pages
;
575 add
= desired_tile_stride
<< PAGE_SHIFT
;
576 row_add
= hw_tile_stride
<< PAGE_SHIFT
;
578 for (i
= 0; i
< rows
; ++i
) {
584 next
= psb_pd_addr_end(addr
, end
);
585 pt
= psb_mmu_pt_map_lock(pd
, addr
);
590 [psb_mmu_pt_index(addr
)]);
593 (addr
& clflush_mask
) < next
);
595 psb_mmu_pt_unmap_unlock(pt
);
596 } while (addr
= next
, next
!= end
);
602 void psb_mmu_remove_pfn_sequence(struct psb_mmu_pd
*pd
,
603 unsigned long address
, uint32_t num_pages
)
605 struct psb_mmu_pt
*pt
;
609 unsigned long f_address
= address
;
611 down_read(&pd
->driver
->sem
);
614 end
= addr
+ (num_pages
<< PAGE_SHIFT
);
617 next
= psb_pd_addr_end(addr
, end
);
618 pt
= psb_mmu_pt_alloc_map_lock(pd
, addr
);
622 psb_mmu_invalidate_pte(pt
, addr
);
624 } while (addr
+= PAGE_SIZE
, addr
< next
);
625 psb_mmu_pt_unmap_unlock(pt
);
627 } while (addr
= next
, next
!= end
);
630 if (pd
->hw_context
!= -1)
631 psb_mmu_flush_ptes(pd
, f_address
, num_pages
, 1, 1);
633 up_read(&pd
->driver
->sem
);
635 if (pd
->hw_context
!= -1)
636 psb_mmu_flush(pd
->driver
, 0);
641 void psb_mmu_remove_pages(struct psb_mmu_pd
*pd
, unsigned long address
,
642 uint32_t num_pages
, uint32_t desired_tile_stride
,
643 uint32_t hw_tile_stride
)
645 struct psb_mmu_pt
*pt
;
652 unsigned long row_add
;
653 unsigned long f_address
= address
;
656 rows
= num_pages
/ desired_tile_stride
;
658 desired_tile_stride
= num_pages
;
660 add
= desired_tile_stride
<< PAGE_SHIFT
;
661 row_add
= hw_tile_stride
<< PAGE_SHIFT
;
663 /* down_read(&pd->driver->sem); */
665 /* Make sure we only need to flush this processor's cache */
667 for (i
= 0; i
< rows
; ++i
) {
673 next
= psb_pd_addr_end(addr
, end
);
674 pt
= psb_mmu_pt_map_lock(pd
, addr
);
678 psb_mmu_invalidate_pte(pt
, addr
);
681 } while (addr
+= PAGE_SIZE
, addr
< next
);
682 psb_mmu_pt_unmap_unlock(pt
);
684 } while (addr
= next
, next
!= end
);
687 if (pd
->hw_context
!= -1)
688 psb_mmu_flush_ptes(pd
, f_address
, num_pages
,
689 desired_tile_stride
, hw_tile_stride
);
691 /* up_read(&pd->driver->sem); */
693 if (pd
->hw_context
!= -1)
694 psb_mmu_flush(pd
->driver
, 0);
697 int psb_mmu_insert_pfn_sequence(struct psb_mmu_pd
*pd
, uint32_t start_pfn
,
698 unsigned long address
, uint32_t num_pages
,
701 struct psb_mmu_pt
*pt
;
706 unsigned long f_address
= address
;
709 down_read(&pd
->driver
->sem
);
712 end
= addr
+ (num_pages
<< PAGE_SHIFT
);
715 next
= psb_pd_addr_end(addr
, end
);
716 pt
= psb_mmu_pt_alloc_map_lock(pd
, addr
);
722 pte
= psb_mmu_mask_pte(start_pfn
++, type
);
723 psb_mmu_set_pte(pt
, addr
, pte
);
725 } while (addr
+= PAGE_SIZE
, addr
< next
);
726 psb_mmu_pt_unmap_unlock(pt
);
728 } while (addr
= next
, next
!= end
);
731 if (pd
->hw_context
!= -1)
732 psb_mmu_flush_ptes(pd
, f_address
, num_pages
, 1, 1);
734 up_read(&pd
->driver
->sem
);
736 if (pd
->hw_context
!= -1)
737 psb_mmu_flush(pd
->driver
, 1);
742 int psb_mmu_insert_pages(struct psb_mmu_pd
*pd
, struct page
**pages
,
743 unsigned long address
, uint32_t num_pages
,
744 uint32_t desired_tile_stride
,
745 uint32_t hw_tile_stride
, int type
)
747 struct psb_mmu_pt
*pt
;
755 unsigned long row_add
;
756 unsigned long f_address
= address
;
759 if (hw_tile_stride
) {
760 if (num_pages
% desired_tile_stride
!= 0)
762 rows
= num_pages
/ desired_tile_stride
;
764 desired_tile_stride
= num_pages
;
767 add
= desired_tile_stride
<< PAGE_SHIFT
;
768 row_add
= hw_tile_stride
<< PAGE_SHIFT
;
770 down_read(&pd
->driver
->sem
);
772 for (i
= 0; i
< rows
; ++i
) {
778 next
= psb_pd_addr_end(addr
, end
);
779 pt
= psb_mmu_pt_alloc_map_lock(pd
, addr
);
786 psb_mmu_mask_pte(page_to_pfn(*pages
++),
788 psb_mmu_set_pte(pt
, addr
, pte
);
790 } while (addr
+= PAGE_SIZE
, addr
< next
);
791 psb_mmu_pt_unmap_unlock(pt
);
793 } while (addr
= next
, next
!= end
);
798 if (pd
->hw_context
!= -1)
799 psb_mmu_flush_ptes(pd
, f_address
, num_pages
,
800 desired_tile_stride
, hw_tile_stride
);
802 up_read(&pd
->driver
->sem
);
804 if (pd
->hw_context
!= -1)
805 psb_mmu_flush(pd
->driver
, 1);
810 int psb_mmu_virtual_to_pfn(struct psb_mmu_pd
*pd
, uint32_t virtual,
814 struct psb_mmu_pt
*pt
;
816 spinlock_t
*lock
= &pd
->driver
->lock
;
818 down_read(&pd
->driver
->sem
);
819 pt
= psb_mmu_pt_map_lock(pd
, virtual);
824 v
= kmap_atomic(pd
->p
);
825 tmp
= v
[psb_mmu_pd_index(virtual)];
829 if (tmp
!= pd
->invalid_pde
|| !(tmp
& PSB_PTE_VALID
) ||
830 !(pd
->invalid_pte
& PSB_PTE_VALID
)) {
835 *pfn
= pd
->invalid_pte
>> PAGE_SHIFT
;
838 tmp
= pt
->v
[psb_mmu_pt_index(virtual)];
839 if (!(tmp
& PSB_PTE_VALID
)) {
843 *pfn
= tmp
>> PAGE_SHIFT
;
845 psb_mmu_pt_unmap_unlock(pt
);
847 up_read(&pd
->driver
->sem
);