2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
20 #include <linux/types.h>
21 #include <linux/string.h>
22 #include <linux/kvm.h>
23 #include <linux/kvm_host.h>
24 #include <linux/highmem.h>
25 #include <linux/gfp.h>
26 #include <linux/slab.h>
27 #include <linux/hugetlb.h>
28 #include <linux/list.h>
29 #include <linux/anon_inodes.h>
31 #include <asm/tlbflush.h>
32 #include <asm/kvm_ppc.h>
33 #include <asm/kvm_book3s.h>
34 #include <asm/book3s/64/mmu-hash.h>
35 #include <asm/hvcall.h>
36 #include <asm/synch.h>
37 #include <asm/ppc-opcode.h>
38 #include <asm/kvm_host.h>
40 #include <asm/iommu.h>
43 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages
)
45 return ALIGN(iommu_pages
* sizeof(u64
), PAGE_SIZE
) / PAGE_SIZE
;
48 static unsigned long kvmppc_stt_pages(unsigned long tce_pages
)
50 unsigned long stt_bytes
= sizeof(struct kvmppc_spapr_tce_table
) +
51 (tce_pages
* sizeof(struct page
*));
53 return tce_pages
+ ALIGN(stt_bytes
, PAGE_SIZE
) / PAGE_SIZE
;
56 static long kvmppc_account_memlimit(unsigned long stt_pages
, bool inc
)
60 if (!current
|| !current
->mm
)
61 return ret
; /* process exited */
63 down_write(¤t
->mm
->mmap_sem
);
66 unsigned long locked
, lock_limit
;
68 locked
= current
->mm
->locked_vm
+ stt_pages
;
69 lock_limit
= rlimit(RLIMIT_MEMLOCK
) >> PAGE_SHIFT
;
70 if (locked
> lock_limit
&& !capable(CAP_IPC_LOCK
))
73 current
->mm
->locked_vm
+= stt_pages
;
75 if (WARN_ON_ONCE(stt_pages
> current
->mm
->locked_vm
))
76 stt_pages
= current
->mm
->locked_vm
;
78 current
->mm
->locked_vm
-= stt_pages
;
81 pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current
->pid
,
83 stt_pages
<< PAGE_SHIFT
,
84 current
->mm
->locked_vm
<< PAGE_SHIFT
,
85 rlimit(RLIMIT_MEMLOCK
),
86 ret
? " - exceeded" : "");
88 up_write(¤t
->mm
->mmap_sem
);
93 static void release_spapr_tce_table(struct rcu_head
*head
)
95 struct kvmppc_spapr_tce_table
*stt
= container_of(head
,
96 struct kvmppc_spapr_tce_table
, rcu
);
97 unsigned long i
, npages
= kvmppc_tce_pages(stt
->size
);
99 for (i
= 0; i
< npages
; i
++)
100 __free_page(stt
->pages
[i
]);
105 static int kvm_spapr_tce_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
107 struct kvmppc_spapr_tce_table
*stt
= vma
->vm_file
->private_data
;
110 if (vmf
->pgoff
>= kvmppc_tce_pages(stt
->size
))
111 return VM_FAULT_SIGBUS
;
113 page
= stt
->pages
[vmf
->pgoff
];
119 static const struct vm_operations_struct kvm_spapr_tce_vm_ops
= {
120 .fault
= kvm_spapr_tce_fault
,
123 static int kvm_spapr_tce_mmap(struct file
*file
, struct vm_area_struct
*vma
)
125 vma
->vm_ops
= &kvm_spapr_tce_vm_ops
;
129 static int kvm_spapr_tce_release(struct inode
*inode
, struct file
*filp
)
131 struct kvmppc_spapr_tce_table
*stt
= filp
->private_data
;
133 list_del_rcu(&stt
->list
);
135 kvm_put_kvm(stt
->kvm
);
137 kvmppc_account_memlimit(
138 kvmppc_stt_pages(kvmppc_tce_pages(stt
->size
)), false);
139 call_rcu(&stt
->rcu
, release_spapr_tce_table
);
144 static const struct file_operations kvm_spapr_tce_fops
= {
145 .mmap
= kvm_spapr_tce_mmap
,
146 .release
= kvm_spapr_tce_release
,
149 long kvm_vm_ioctl_create_spapr_tce(struct kvm
*kvm
,
150 struct kvm_create_spapr_tce_64
*args
)
152 struct kvmppc_spapr_tce_table
*stt
= NULL
;
153 unsigned long npages
, size
;
160 /* Check this LIOBN hasn't been previously allocated */
161 list_for_each_entry(stt
, &kvm
->arch
.spapr_tce_tables
, list
) {
162 if (stt
->liobn
== args
->liobn
)
167 npages
= kvmppc_tce_pages(size
);
168 ret
= kvmppc_account_memlimit(kvmppc_stt_pages(npages
), true);
174 stt
= kzalloc(sizeof(*stt
) + npages
* sizeof(struct page
*),
179 stt
->liobn
= args
->liobn
;
180 stt
->page_shift
= args
->page_shift
;
181 stt
->offset
= args
->offset
;
185 for (i
= 0; i
< npages
; i
++) {
186 stt
->pages
[i
] = alloc_page(GFP_KERNEL
| __GFP_ZERO
);
193 mutex_lock(&kvm
->lock
);
194 list_add_rcu(&stt
->list
, &kvm
->arch
.spapr_tce_tables
);
196 mutex_unlock(&kvm
->lock
);
198 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops
,
199 stt
, O_RDWR
| O_CLOEXEC
);
203 for (i
= 0; i
< npages
; i
++)
205 __free_page(stt
->pages
[i
]);
212 long kvmppc_h_put_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
213 unsigned long ioba
, unsigned long tce
)
215 struct kvmppc_spapr_tce_table
*stt
= kvmppc_find_table(vcpu
, liobn
);
218 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
219 /* liobn, ioba, tce); */
224 ret
= kvmppc_ioba_validate(stt
, ioba
, 1);
225 if (ret
!= H_SUCCESS
)
228 ret
= kvmppc_tce_validate(stt
, tce
);
229 if (ret
!= H_SUCCESS
)
232 kvmppc_tce_put(stt
, ioba
>> stt
->page_shift
, tce
);
236 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce
);
238 long kvmppc_h_put_tce_indirect(struct kvm_vcpu
*vcpu
,
239 unsigned long liobn
, unsigned long ioba
,
240 unsigned long tce_list
, unsigned long npages
)
242 struct kvmppc_spapr_tce_table
*stt
;
243 long i
, ret
= H_SUCCESS
, idx
;
244 unsigned long entry
, ua
= 0;
245 u64 __user
*tces
, tce
;
247 stt
= kvmppc_find_table(vcpu
, liobn
);
251 entry
= ioba
>> stt
->page_shift
;
253 * SPAPR spec says that the maximum size of the list is 512 TCEs
254 * so the whole table fits in 4K page
259 if (tce_list
& (SZ_4K
- 1))
262 ret
= kvmppc_ioba_validate(stt
, ioba
, npages
);
263 if (ret
!= H_SUCCESS
)
266 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
267 if (kvmppc_gpa_to_ua(vcpu
->kvm
, tce_list
, &ua
, NULL
)) {
271 tces
= (u64 __user
*) ua
;
273 for (i
= 0; i
< npages
; ++i
) {
274 if (get_user(tce
, tces
+ i
)) {
278 tce
= be64_to_cpu(tce
);
280 ret
= kvmppc_tce_validate(stt
, tce
);
281 if (ret
!= H_SUCCESS
)
284 kvmppc_tce_put(stt
, entry
+ i
, tce
);
288 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
292 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect
);
294 long kvmppc_h_stuff_tce(struct kvm_vcpu
*vcpu
,
295 unsigned long liobn
, unsigned long ioba
,
296 unsigned long tce_value
, unsigned long npages
)
298 struct kvmppc_spapr_tce_table
*stt
;
301 stt
= kvmppc_find_table(vcpu
, liobn
);
305 ret
= kvmppc_ioba_validate(stt
, ioba
, npages
);
306 if (ret
!= H_SUCCESS
)
309 /* Check permission bits only to allow userspace poison TCE for debug */
310 if (tce_value
& (TCE_PCI_WRITE
| TCE_PCI_READ
))
313 for (i
= 0; i
< npages
; ++i
, ioba
+= (1ULL << stt
->page_shift
))
314 kvmppc_tce_put(stt
, ioba
>> stt
->page_shift
, tce_value
);
318 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce
);