1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2014-2016 Intel Corporation
5 */
6
7 #include <linux/highmem.h>
8 #include <linux/shmem_fs.h>
9 #include <linux/swap.h>
10
11 #include <drm/drm_cache.h>
12
13 #include "gt/intel_gt.h"
14 #include "i915_drv.h"
15 #include "i915_gem_object.h"
16 #include "i915_gem_region.h"
17 #include "i915_scatterlist.h"
18
i915_gem_object_get_pages_phys(struct drm_i915_gem_object * obj)19 static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
20 {
21 struct address_space *mapping = obj->base.filp->f_mapping;
22 struct scatterlist *sg;
23 struct sg_table *st;
24 dma_addr_t dma;
25 void *vaddr;
26 void *dst;
27 int i;
28
29 if (GEM_WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
30 return -EINVAL;
31
32 /*
33 * Always aligning to the object size, allows a single allocation
34 * to handle all possible callers, and given typical object sizes,
35 * the alignment of the buddy allocation will naturally match.
36 */
37 vaddr = dma_alloc_coherent(obj->base.dev->dev,
38 roundup_pow_of_two(obj->base.size),
39 &dma, GFP_KERNEL);
40 if (!vaddr)
41 return -ENOMEM;
42
43 st = kmalloc(sizeof(*st), GFP_KERNEL);
44 if (!st)
45 goto err_pci;
46
47 if (sg_alloc_table(st, 1, GFP_KERNEL))
48 goto err_st;
49
50 sg = st->sgl;
51 sg->offset = 0;
52 sg->length = obj->base.size;
53
54 sg_assign_page(sg, (struct page *)vaddr);
55 sg_dma_address(sg) = dma;
56 sg_dma_len(sg) = obj->base.size;
57
58 dst = vaddr;
59 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
60 struct page *page;
61 void *src;
62
63 page = shmem_read_mapping_page(mapping, i);
64 if (IS_ERR(page))
65 goto err_st;
66
67 src = kmap_atomic(page);
68 memcpy(dst, src, PAGE_SIZE);
69 drm_clflush_virt_range(dst, PAGE_SIZE);
70 kunmap_atomic(src);
71
72 put_page(page);
73 dst += PAGE_SIZE;
74 }
75
76 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
77
78 /* We're no longer struct page backed */
79 obj->mem_flags &= ~I915_BO_FLAG_STRUCT_PAGE;
80 __i915_gem_object_set_pages(obj, st, sg->length);
81
82 return 0;
83
84 err_st:
85 kfree(st);
86 err_pci:
87 dma_free_coherent(obj->base.dev->dev,
88 roundup_pow_of_two(obj->base.size),
89 vaddr, dma);
90 return -ENOMEM;
91 }
92
93 void
i915_gem_object_put_pages_phys(struct drm_i915_gem_object * obj,struct sg_table * pages)94 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
95 struct sg_table *pages)
96 {
97 dma_addr_t dma = sg_dma_address(pages->sgl);
98 void *vaddr = sg_page(pages->sgl);
99
100 __i915_gem_object_release_shmem(obj, pages, false);
101
102 if (obj->mm.dirty) {
103 struct address_space *mapping = obj->base.filp->f_mapping;
104 void *src = vaddr;
105 int i;
106
107 for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
108 struct page *page;
109 char *dst;
110
111 page = shmem_read_mapping_page(mapping, i);
112 if (IS_ERR(page))
113 continue;
114
115 dst = kmap_atomic(page);
116 drm_clflush_virt_range(src, PAGE_SIZE);
117 memcpy(dst, src, PAGE_SIZE);
118 kunmap_atomic(dst);
119
120 set_page_dirty(page);
121 if (obj->mm.madv == I915_MADV_WILLNEED)
122 mark_page_accessed(page);
123 put_page(page);
124
125 src += PAGE_SIZE;
126 }
127 obj->mm.dirty = false;
128 }
129
130 sg_free_table(pages);
131 kfree(pages);
132
133 dma_free_coherent(obj->base.dev->dev,
134 roundup_pow_of_two(obj->base.size),
135 vaddr, dma);
136 }
137
i915_gem_object_pwrite_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pwrite * args)138 int i915_gem_object_pwrite_phys(struct drm_i915_gem_object *obj,
139 const struct drm_i915_gem_pwrite *args)
140 {
141 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
142 char __user *user_data = u64_to_user_ptr(args->data_ptr);
143 int err;
144
145 err = i915_gem_object_wait(obj,
146 I915_WAIT_INTERRUPTIBLE |
147 I915_WAIT_ALL,
148 MAX_SCHEDULE_TIMEOUT);
149 if (err)
150 return err;
151
152 /*
153 * We manually control the domain here and pretend that it
154 * remains coherent i.e. in the GTT domain, like shmem_pwrite.
155 */
156 i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
157
158 if (copy_from_user(vaddr, user_data, args->size))
159 return -EFAULT;
160
161 drm_clflush_virt_range(vaddr, args->size);
162 intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
163
164 i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
165 return 0;
166 }
167
i915_gem_object_pread_phys(struct drm_i915_gem_object * obj,const struct drm_i915_gem_pread * args)168 int i915_gem_object_pread_phys(struct drm_i915_gem_object *obj,
169 const struct drm_i915_gem_pread *args)
170 {
171 void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
172 char __user *user_data = u64_to_user_ptr(args->data_ptr);
173 int err;
174
175 err = i915_gem_object_wait(obj,
176 I915_WAIT_INTERRUPTIBLE,
177 MAX_SCHEDULE_TIMEOUT);
178 if (err)
179 return err;
180
181 drm_clflush_virt_range(vaddr, args->size);
182 if (copy_to_user(user_data, vaddr, args->size))
183 return -EFAULT;
184
185 return 0;
186 }
187
i915_gem_object_shmem_to_phys(struct drm_i915_gem_object * obj)188 static int i915_gem_object_shmem_to_phys(struct drm_i915_gem_object *obj)
189 {
190 struct sg_table *pages;
191 int err;
192
193 pages = __i915_gem_object_unset_pages(obj);
194
195 err = i915_gem_object_get_pages_phys(obj);
196 if (err)
197 goto err_xfer;
198
199 /* Perma-pin (until release) the physical set of pages */
200 __i915_gem_object_pin_pages(obj);
201
202 if (!IS_ERR_OR_NULL(pages))
203 i915_gem_object_put_pages_shmem(obj, pages);
204
205 i915_gem_object_release_memory_region(obj);
206 return 0;
207
208 err_xfer:
209 if (!IS_ERR_OR_NULL(pages)) {
210 unsigned int sg_page_sizes = i915_sg_dma_sizes(pages->sgl);
211
212 __i915_gem_object_set_pages(obj, pages, sg_page_sizes);
213 }
214 return err;
215 }
216
i915_gem_object_attach_phys(struct drm_i915_gem_object * obj,int align)217 int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
218 {
219 int err;
220
221 assert_object_held(obj);
222
223 if (align > obj->base.size)
224 return -EINVAL;
225
226 if (!i915_gem_object_is_shmem(obj))
227 return -EINVAL;
228
229 if (!i915_gem_object_has_struct_page(obj))
230 return 0;
231
232 err = i915_gem_object_unbind(obj, I915_GEM_OBJECT_UNBIND_ACTIVE);
233 if (err)
234 return err;
235
236 if (obj->mm.madv != I915_MADV_WILLNEED)
237 return -EFAULT;
238
239 if (i915_gem_object_has_tiling_quirk(obj))
240 return -EFAULT;
241
242 if (obj->mm.mapping || i915_gem_object_has_pinned_pages(obj))
243 return -EBUSY;
244
245 if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
246 drm_dbg(obj->base.dev,
247 "Attempting to obtain a purgeable object\n");
248 return -EFAULT;
249 }
250
251 return i915_gem_object_shmem_to_phys(obj);
252 }
253
254 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
255 #include "selftests/i915_gem_phys.c"
256 #endif
257