1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 *
4 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
5 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
6 * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
7 */
8
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/kvm.h>
12 #include <linux/kvm_host.h>
13 #include <linux/highmem.h>
14 #include <linux/gfp.h>
15 #include <linux/slab.h>
16 #include <linux/sched/signal.h>
17 #include <linux/hugetlb.h>
18 #include <linux/list.h>
19 #include <linux/anon_inodes.h>
20 #include <linux/iommu.h>
21 #include <linux/file.h>
22 #include <linux/mm.h>
23
24 #include <asm/kvm_ppc.h>
25 #include <asm/kvm_book3s.h>
26 #include <asm/book3s/64/mmu-hash.h>
27 #include <asm/hvcall.h>
28 #include <asm/synch.h>
29 #include <asm/ppc-opcode.h>
30 #include <asm/udbg.h>
31 #include <asm/iommu.h>
32 #include <asm/tce.h>
33 #include <asm/mmu_context.h>
34
kvmppc_tce_pages(unsigned long iommu_pages)35 static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
36 {
37 return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
38 }
39
kvmppc_stt_pages(unsigned long tce_pages)40 static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
41 {
42 unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
43 (tce_pages * sizeof(struct page *));
44
45 return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
46 }
47
kvm_spapr_tce_iommu_table_free(struct rcu_head * head)48 static void kvm_spapr_tce_iommu_table_free(struct rcu_head *head)
49 {
50 struct kvmppc_spapr_tce_iommu_table *stit = container_of(head,
51 struct kvmppc_spapr_tce_iommu_table, rcu);
52
53 iommu_tce_table_put(stit->tbl);
54
55 kfree(stit);
56 }
57
kvm_spapr_tce_liobn_put(struct kref * kref)58 static void kvm_spapr_tce_liobn_put(struct kref *kref)
59 {
60 struct kvmppc_spapr_tce_iommu_table *stit = container_of(kref,
61 struct kvmppc_spapr_tce_iommu_table, kref);
62
63 list_del_rcu(&stit->next);
64
65 call_rcu(&stit->rcu, kvm_spapr_tce_iommu_table_free);
66 }
67
kvm_spapr_tce_release_iommu_group(struct kvm * kvm,struct iommu_group * grp)68 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
69 struct iommu_group *grp)
70 {
71 int i;
72 struct kvmppc_spapr_tce_table *stt;
73 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
74 struct iommu_table_group *table_group = NULL;
75
76 rcu_read_lock();
77 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
78
79 table_group = iommu_group_get_iommudata(grp);
80 if (WARN_ON(!table_group))
81 continue;
82
83 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
84 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
85 if (table_group->tables[i] != stit->tbl)
86 continue;
87
88 kref_put(&stit->kref, kvm_spapr_tce_liobn_put);
89 }
90 }
91 cond_resched_rcu();
92 }
93 rcu_read_unlock();
94 }
95
kvm_spapr_tce_attach_iommu_group(struct kvm * kvm,int tablefd,struct iommu_group * grp)96 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
97 struct iommu_group *grp)
98 {
99 struct kvmppc_spapr_tce_table *stt = NULL;
100 bool found = false;
101 struct iommu_table *tbl = NULL;
102 struct iommu_table_group *table_group;
103 long i;
104 struct kvmppc_spapr_tce_iommu_table *stit;
105 struct fd f;
106
107 f = fdget(tablefd);
108 if (!f.file)
109 return -EBADF;
110
111 rcu_read_lock();
112 list_for_each_entry_rcu(stt, &kvm->arch.spapr_tce_tables, list) {
113 if (stt == f.file->private_data) {
114 found = true;
115 break;
116 }
117 }
118 rcu_read_unlock();
119
120 fdput(f);
121
122 if (!found)
123 return -EINVAL;
124
125 table_group = iommu_group_get_iommudata(grp);
126 if (WARN_ON(!table_group))
127 return -EFAULT;
128
129 for (i = 0; i < IOMMU_TABLE_GROUP_MAX_TABLES; ++i) {
130 struct iommu_table *tbltmp = table_group->tables[i];
131
132 if (!tbltmp)
133 continue;
134 /* Make sure hardware table parameters are compatible */
135 if ((tbltmp->it_page_shift <= stt->page_shift) &&
136 (tbltmp->it_offset << tbltmp->it_page_shift ==
137 stt->offset << stt->page_shift) &&
138 (tbltmp->it_size << tbltmp->it_page_shift >=
139 stt->size << stt->page_shift)) {
140 /*
141 * Reference the table to avoid races with
142 * add/remove DMA windows.
143 */
144 tbl = iommu_tce_table_get(tbltmp);
145 break;
146 }
147 }
148 if (!tbl)
149 return -EINVAL;
150
151 rcu_read_lock();
152 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
153 if (tbl != stit->tbl)
154 continue;
155
156 if (!kref_get_unless_zero(&stit->kref)) {
157 /* stit is being destroyed */
158 iommu_tce_table_put(tbl);
159 rcu_read_unlock();
160 return -ENOTTY;
161 }
162 /*
163 * The table is already known to this KVM, we just increased
164 * its KVM reference counter and can return.
165 */
166 rcu_read_unlock();
167 return 0;
168 }
169 rcu_read_unlock();
170
171 stit = kzalloc(sizeof(*stit), GFP_KERNEL);
172 if (!stit) {
173 iommu_tce_table_put(tbl);
174 return -ENOMEM;
175 }
176
177 stit->tbl = tbl;
178 kref_init(&stit->kref);
179
180 list_add_rcu(&stit->next, &stt->iommu_tables);
181
182 return 0;
183 }
184
release_spapr_tce_table(struct rcu_head * head)185 static void release_spapr_tce_table(struct rcu_head *head)
186 {
187 struct kvmppc_spapr_tce_table *stt = container_of(head,
188 struct kvmppc_spapr_tce_table, rcu);
189 unsigned long i, npages = kvmppc_tce_pages(stt->size);
190
191 for (i = 0; i < npages; i++)
192 if (stt->pages[i])
193 __free_page(stt->pages[i]);
194
195 kfree(stt);
196 }
197
kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table * stt,unsigned long sttpage)198 static struct page *kvm_spapr_get_tce_page(struct kvmppc_spapr_tce_table *stt,
199 unsigned long sttpage)
200 {
201 struct page *page = stt->pages[sttpage];
202
203 if (page)
204 return page;
205
206 mutex_lock(&stt->alloc_lock);
207 page = stt->pages[sttpage];
208 if (!page) {
209 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
210 WARN_ON_ONCE(!page);
211 if (page)
212 stt->pages[sttpage] = page;
213 }
214 mutex_unlock(&stt->alloc_lock);
215
216 return page;
217 }
218
kvm_spapr_tce_fault(struct vm_fault * vmf)219 static vm_fault_t kvm_spapr_tce_fault(struct vm_fault *vmf)
220 {
221 struct kvmppc_spapr_tce_table *stt = vmf->vma->vm_file->private_data;
222 struct page *page;
223
224 if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
225 return VM_FAULT_SIGBUS;
226
227 page = kvm_spapr_get_tce_page(stt, vmf->pgoff);
228 if (!page)
229 return VM_FAULT_OOM;
230
231 get_page(page);
232 vmf->page = page;
233 return 0;
234 }
235
236 static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
237 .fault = kvm_spapr_tce_fault,
238 };
239
kvm_spapr_tce_mmap(struct file * file,struct vm_area_struct * vma)240 static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
241 {
242 vma->vm_ops = &kvm_spapr_tce_vm_ops;
243 return 0;
244 }
245
kvm_spapr_tce_release(struct inode * inode,struct file * filp)246 static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
247 {
248 struct kvmppc_spapr_tce_table *stt = filp->private_data;
249 struct kvmppc_spapr_tce_iommu_table *stit, *tmp;
250 struct kvm *kvm = stt->kvm;
251
252 mutex_lock(&kvm->lock);
253 list_del_rcu(&stt->list);
254 mutex_unlock(&kvm->lock);
255
256 list_for_each_entry_safe(stit, tmp, &stt->iommu_tables, next) {
257 WARN_ON(!kref_read(&stit->kref));
258 while (1) {
259 if (kref_put(&stit->kref, kvm_spapr_tce_liobn_put))
260 break;
261 }
262 }
263
264 account_locked_vm(kvm->mm,
265 kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
266
267 kvm_put_kvm(stt->kvm);
268
269 call_rcu(&stt->rcu, release_spapr_tce_table);
270
271 return 0;
272 }
273
274 static const struct file_operations kvm_spapr_tce_fops = {
275 .mmap = kvm_spapr_tce_mmap,
276 .release = kvm_spapr_tce_release,
277 };
278
kvm_vm_ioctl_create_spapr_tce(struct kvm * kvm,struct kvm_create_spapr_tce_64 * args)279 long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
280 struct kvm_create_spapr_tce_64 *args)
281 {
282 struct kvmppc_spapr_tce_table *stt = NULL;
283 struct kvmppc_spapr_tce_table *siter;
284 struct mm_struct *mm = kvm->mm;
285 unsigned long npages, size = args->size;
286 int ret;
287
288 if (!args->size || args->page_shift < 12 || args->page_shift > 34 ||
289 (args->offset + args->size > (ULLONG_MAX >> args->page_shift)))
290 return -EINVAL;
291
292 npages = kvmppc_tce_pages(size);
293 ret = account_locked_vm(mm, kvmppc_stt_pages(npages), true);
294 if (ret)
295 return ret;
296
297 ret = -ENOMEM;
298 stt = kzalloc(struct_size(stt, pages, npages), GFP_KERNEL);
299 if (!stt)
300 goto fail_acct;
301
302 stt->liobn = args->liobn;
303 stt->page_shift = args->page_shift;
304 stt->offset = args->offset;
305 stt->size = size;
306 stt->kvm = kvm;
307 mutex_init(&stt->alloc_lock);
308 INIT_LIST_HEAD_RCU(&stt->iommu_tables);
309
310 mutex_lock(&kvm->lock);
311
312 /* Check this LIOBN hasn't been previously allocated */
313 ret = 0;
314 list_for_each_entry(siter, &kvm->arch.spapr_tce_tables, list) {
315 if (siter->liobn == args->liobn) {
316 ret = -EBUSY;
317 break;
318 }
319 }
320
321 kvm_get_kvm(kvm);
322 if (!ret)
323 ret = anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
324 stt, O_RDWR | O_CLOEXEC);
325
326 if (ret >= 0)
327 list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
328 else
329 kvm_put_kvm_no_destroy(kvm);
330
331 mutex_unlock(&kvm->lock);
332
333 if (ret >= 0)
334 return ret;
335
336 kfree(stt);
337 fail_acct:
338 account_locked_vm(mm, kvmppc_stt_pages(npages), false);
339 return ret;
340 }
341
kvmppc_tce_to_ua(struct kvm * kvm,unsigned long tce,unsigned long * ua)342 static long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
343 unsigned long *ua)
344 {
345 unsigned long gfn = tce >> PAGE_SHIFT;
346 struct kvm_memory_slot *memslot;
347
348 memslot = __gfn_to_memslot(kvm_memslots(kvm), gfn);
349 if (!memslot)
350 return -EINVAL;
351
352 *ua = __gfn_to_hva_memslot(memslot, gfn) |
353 (tce & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
354
355 return 0;
356 }
357
kvmppc_tce_validate(struct kvmppc_spapr_tce_table * stt,unsigned long tce)358 static long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt,
359 unsigned long tce)
360 {
361 unsigned long gpa = tce & ~(TCE_PCI_READ | TCE_PCI_WRITE);
362 enum dma_data_direction dir = iommu_tce_direction(tce);
363 struct kvmppc_spapr_tce_iommu_table *stit;
364 unsigned long ua = 0;
365
366 /* Allow userspace to poison TCE table */
367 if (dir == DMA_NONE)
368 return H_SUCCESS;
369
370 if (iommu_tce_check_gpa(stt->page_shift, gpa))
371 return H_TOO_HARD;
372
373 if (kvmppc_tce_to_ua(stt->kvm, tce, &ua))
374 return H_TOO_HARD;
375
376 rcu_read_lock();
377 list_for_each_entry_rcu(stit, &stt->iommu_tables, next) {
378 unsigned long hpa = 0;
379 struct mm_iommu_table_group_mem_t *mem;
380 long shift = stit->tbl->it_page_shift;
381
382 mem = mm_iommu_lookup(stt->kvm->mm, ua, 1ULL << shift);
383 if (!mem || mm_iommu_ua_to_hpa(mem, ua, shift, &hpa)) {
384 rcu_read_unlock();
385 return H_TOO_HARD;
386 }
387 }
388 rcu_read_unlock();
389
390 return H_SUCCESS;
391 }
392
393 /*
394 * Handles TCE requests for emulated devices.
395 * Puts guest TCE values to the table and expects user space to convert them.
396 * Cannot fail so kvmppc_tce_validate must be called before it.
397 */
kvmppc_tce_put(struct kvmppc_spapr_tce_table * stt,unsigned long idx,unsigned long tce)398 static void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
399 unsigned long idx, unsigned long tce)
400 {
401 struct page *page;
402 u64 *tbl;
403 unsigned long sttpage;
404
405 idx -= stt->offset;
406 sttpage = idx / TCES_PER_PAGE;
407 page = stt->pages[sttpage];
408
409 if (!page) {
410 /* We allow any TCE, not just with read|write permissions */
411 if (!tce)
412 return;
413
414 page = kvm_spapr_get_tce_page(stt, sttpage);
415 if (!page)
416 return;
417 }
418 tbl = page_to_virt(page);
419
420 tbl[idx % TCES_PER_PAGE] = tce;
421 }
422
kvmppc_clear_tce(struct mm_struct * mm,struct iommu_table * tbl,unsigned long entry)423 static void kvmppc_clear_tce(struct mm_struct *mm, struct iommu_table *tbl,
424 unsigned long entry)
425 {
426 unsigned long hpa = 0;
427 enum dma_data_direction dir = DMA_NONE;
428
429 iommu_tce_xchg_no_kill(mm, tbl, entry, &hpa, &dir);
430 }
431
kvmppc_tce_iommu_mapped_dec(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)432 static long kvmppc_tce_iommu_mapped_dec(struct kvm *kvm,
433 struct iommu_table *tbl, unsigned long entry)
434 {
435 struct mm_iommu_table_group_mem_t *mem = NULL;
436 const unsigned long pgsize = 1ULL << tbl->it_page_shift;
437 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY_RO(tbl, entry);
438
439 if (!pua)
440 return H_SUCCESS;
441
442 mem = mm_iommu_lookup(kvm->mm, be64_to_cpu(*pua), pgsize);
443 if (!mem)
444 return H_TOO_HARD;
445
446 mm_iommu_mapped_dec(mem);
447
448 *pua = cpu_to_be64(0);
449
450 return H_SUCCESS;
451 }
452
kvmppc_tce_iommu_do_unmap(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry)453 static long kvmppc_tce_iommu_do_unmap(struct kvm *kvm,
454 struct iommu_table *tbl, unsigned long entry)
455 {
456 enum dma_data_direction dir = DMA_NONE;
457 unsigned long hpa = 0;
458 long ret;
459
460 if (WARN_ON_ONCE(iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa,
461 &dir)))
462 return H_TOO_HARD;
463
464 if (dir == DMA_NONE)
465 return H_SUCCESS;
466
467 ret = kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
468 if (ret != H_SUCCESS)
469 iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
470
471 return ret;
472 }
473
kvmppc_tce_iommu_unmap(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry)474 static long kvmppc_tce_iommu_unmap(struct kvm *kvm,
475 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
476 unsigned long entry)
477 {
478 unsigned long i, ret = H_SUCCESS;
479 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
480 unsigned long io_entry = entry * subpages;
481
482 for (i = 0; i < subpages; ++i) {
483 ret = kvmppc_tce_iommu_do_unmap(kvm, tbl, io_entry + i);
484 if (ret != H_SUCCESS)
485 break;
486 }
487
488 return ret;
489 }
490
kvmppc_tce_iommu_do_map(struct kvm * kvm,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)491 static long kvmppc_tce_iommu_do_map(struct kvm *kvm, struct iommu_table *tbl,
492 unsigned long entry, unsigned long ua,
493 enum dma_data_direction dir)
494 {
495 long ret;
496 unsigned long hpa;
497 __be64 *pua = IOMMU_TABLE_USERSPACE_ENTRY(tbl, entry);
498 struct mm_iommu_table_group_mem_t *mem;
499
500 if (!pua)
501 /* it_userspace allocation might be delayed */
502 return H_TOO_HARD;
503
504 mem = mm_iommu_lookup(kvm->mm, ua, 1ULL << tbl->it_page_shift);
505 if (!mem)
506 /* This only handles v2 IOMMU type, v1 is handled via ioctl() */
507 return H_TOO_HARD;
508
509 if (WARN_ON_ONCE(mm_iommu_ua_to_hpa(mem, ua, tbl->it_page_shift, &hpa)))
510 return H_TOO_HARD;
511
512 if (mm_iommu_mapped_inc(mem))
513 return H_TOO_HARD;
514
515 ret = iommu_tce_xchg_no_kill(kvm->mm, tbl, entry, &hpa, &dir);
516 if (WARN_ON_ONCE(ret)) {
517 mm_iommu_mapped_dec(mem);
518 return H_TOO_HARD;
519 }
520
521 if (dir != DMA_NONE)
522 kvmppc_tce_iommu_mapped_dec(kvm, tbl, entry);
523
524 *pua = cpu_to_be64(ua);
525
526 return 0;
527 }
528
kvmppc_tce_iommu_map(struct kvm * kvm,struct kvmppc_spapr_tce_table * stt,struct iommu_table * tbl,unsigned long entry,unsigned long ua,enum dma_data_direction dir)529 static long kvmppc_tce_iommu_map(struct kvm *kvm,
530 struct kvmppc_spapr_tce_table *stt, struct iommu_table *tbl,
531 unsigned long entry, unsigned long ua,
532 enum dma_data_direction dir)
533 {
534 unsigned long i, pgoff, ret = H_SUCCESS;
535 unsigned long subpages = 1ULL << (stt->page_shift - tbl->it_page_shift);
536 unsigned long io_entry = entry * subpages;
537
538 for (i = 0, pgoff = 0; i < subpages;
539 ++i, pgoff += IOMMU_PAGE_SIZE(tbl)) {
540
541 ret = kvmppc_tce_iommu_do_map(kvm, tbl,
542 io_entry + i, ua + pgoff, dir);
543 if (ret != H_SUCCESS)
544 break;
545 }
546
547 return ret;
548 }
549
kvmppc_h_put_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce)550 long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
551 unsigned long ioba, unsigned long tce)
552 {
553 struct kvmppc_spapr_tce_table *stt;
554 long ret, idx;
555 struct kvmppc_spapr_tce_iommu_table *stit;
556 unsigned long entry, ua = 0;
557 enum dma_data_direction dir;
558
559 /* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
560 /* liobn, ioba, tce); */
561
562 stt = kvmppc_find_table(vcpu->kvm, liobn);
563 if (!stt)
564 return H_TOO_HARD;
565
566 ret = kvmppc_ioba_validate(stt, ioba, 1);
567 if (ret != H_SUCCESS)
568 return ret;
569
570 idx = srcu_read_lock(&vcpu->kvm->srcu);
571
572 ret = kvmppc_tce_validate(stt, tce);
573 if (ret != H_SUCCESS)
574 goto unlock_exit;
575
576 dir = iommu_tce_direction(tce);
577
578 if ((dir != DMA_NONE) && kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
579 ret = H_PARAMETER;
580 goto unlock_exit;
581 }
582
583 entry = ioba >> stt->page_shift;
584
585 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
586 if (dir == DMA_NONE)
587 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
588 stit->tbl, entry);
589 else
590 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt, stit->tbl,
591 entry, ua, dir);
592
593 iommu_tce_kill(stit->tbl, entry, 1);
594
595 if (ret != H_SUCCESS) {
596 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
597 goto unlock_exit;
598 }
599 }
600
601 kvmppc_tce_put(stt, entry, tce);
602
603 unlock_exit:
604 srcu_read_unlock(&vcpu->kvm->srcu, idx);
605
606 return ret;
607 }
608 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
609
kvmppc_h_put_tce_indirect(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_list,unsigned long npages)610 long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
611 unsigned long liobn, unsigned long ioba,
612 unsigned long tce_list, unsigned long npages)
613 {
614 struct kvmppc_spapr_tce_table *stt;
615 long i, ret = H_SUCCESS, idx;
616 unsigned long entry, ua = 0;
617 u64 __user *tces;
618 u64 tce;
619 struct kvmppc_spapr_tce_iommu_table *stit;
620
621 stt = kvmppc_find_table(vcpu->kvm, liobn);
622 if (!stt)
623 return H_TOO_HARD;
624
625 entry = ioba >> stt->page_shift;
626 /*
627 * SPAPR spec says that the maximum size of the list is 512 TCEs
628 * so the whole table fits in 4K page
629 */
630 if (npages > 512)
631 return H_PARAMETER;
632
633 if (tce_list & (SZ_4K - 1))
634 return H_PARAMETER;
635
636 ret = kvmppc_ioba_validate(stt, ioba, npages);
637 if (ret != H_SUCCESS)
638 return ret;
639
640 idx = srcu_read_lock(&vcpu->kvm->srcu);
641 if (kvmppc_tce_to_ua(vcpu->kvm, tce_list, &ua)) {
642 ret = H_TOO_HARD;
643 goto unlock_exit;
644 }
645 tces = (u64 __user *) ua;
646
647 for (i = 0; i < npages; ++i) {
648 if (get_user(tce, tces + i)) {
649 ret = H_TOO_HARD;
650 goto unlock_exit;
651 }
652 tce = be64_to_cpu(tce);
653
654 ret = kvmppc_tce_validate(stt, tce);
655 if (ret != H_SUCCESS)
656 goto unlock_exit;
657 }
658
659 for (i = 0; i < npages; ++i) {
660 /*
661 * This looks unsafe, because we validate, then regrab
662 * the TCE from userspace which could have been changed by
663 * another thread.
664 *
665 * But it actually is safe, because the relevant checks will be
666 * re-executed in the following code. If userspace tries to
667 * change this dodgily it will result in a messier failure mode
668 * but won't threaten the host.
669 */
670 if (get_user(tce, tces + i)) {
671 ret = H_TOO_HARD;
672 goto invalidate_exit;
673 }
674 tce = be64_to_cpu(tce);
675
676 if (kvmppc_tce_to_ua(vcpu->kvm, tce, &ua)) {
677 ret = H_PARAMETER;
678 goto invalidate_exit;
679 }
680
681 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
682 ret = kvmppc_tce_iommu_map(vcpu->kvm, stt,
683 stit->tbl, entry + i, ua,
684 iommu_tce_direction(tce));
685
686 if (ret != H_SUCCESS) {
687 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl,
688 entry);
689 goto invalidate_exit;
690 }
691 }
692
693 kvmppc_tce_put(stt, entry + i, tce);
694 }
695
696 invalidate_exit:
697 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
698 iommu_tce_kill(stit->tbl, entry, npages);
699
700 unlock_exit:
701 srcu_read_unlock(&vcpu->kvm->srcu, idx);
702
703 return ret;
704 }
705 EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
706
kvmppc_h_stuff_tce(struct kvm_vcpu * vcpu,unsigned long liobn,unsigned long ioba,unsigned long tce_value,unsigned long npages)707 long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
708 unsigned long liobn, unsigned long ioba,
709 unsigned long tce_value, unsigned long npages)
710 {
711 struct kvmppc_spapr_tce_table *stt;
712 long i, ret;
713 struct kvmppc_spapr_tce_iommu_table *stit;
714
715 stt = kvmppc_find_table(vcpu->kvm, liobn);
716 if (!stt)
717 return H_TOO_HARD;
718
719 ret = kvmppc_ioba_validate(stt, ioba, npages);
720 if (ret != H_SUCCESS)
721 return ret;
722
723 /* Check permission bits only to allow userspace poison TCE for debug */
724 if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
725 return H_PARAMETER;
726
727 list_for_each_entry_lockless(stit, &stt->iommu_tables, next) {
728 unsigned long entry = ioba >> stt->page_shift;
729
730 for (i = 0; i < npages; ++i) {
731 ret = kvmppc_tce_iommu_unmap(vcpu->kvm, stt,
732 stit->tbl, entry + i);
733
734 if (ret == H_SUCCESS)
735 continue;
736
737 if (ret == H_TOO_HARD)
738 goto invalidate_exit;
739
740 WARN_ON_ONCE(1);
741 kvmppc_clear_tce(vcpu->kvm->mm, stit->tbl, entry);
742 }
743 }
744
745 for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
746 kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
747
748 invalidate_exit:
749 list_for_each_entry_lockless(stit, &stt->iommu_tables, next)
750 iommu_tce_kill(stit->tbl, ioba >> stt->page_shift, npages);
751
752 return ret;
753 }
754 EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
755