1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020 Intel Corporation
4 */
5
6 #include <linux/slab.h>
7
8 #include "gem/i915_gem_lmem.h"
9
10 #include "i915_trace.h"
11 #include "intel_gtt.h"
12 #include "gen6_ppgtt.h"
13 #include "gen8_ppgtt.h"
14
alloc_pt(struct i915_address_space * vm)15 struct i915_page_table *alloc_pt(struct i915_address_space *vm)
16 {
17 struct i915_page_table *pt;
18
19 pt = kmalloc(sizeof(*pt), I915_GFP_ALLOW_FAIL);
20 if (unlikely(!pt))
21 return ERR_PTR(-ENOMEM);
22
23 pt->base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
24 if (IS_ERR(pt->base)) {
25 kfree(pt);
26 return ERR_PTR(-ENOMEM);
27 }
28
29 atomic_set(&pt->used, 0);
30 return pt;
31 }
32
__alloc_pd(int count)33 struct i915_page_directory *__alloc_pd(int count)
34 {
35 struct i915_page_directory *pd;
36
37 pd = kzalloc(sizeof(*pd), I915_GFP_ALLOW_FAIL);
38 if (unlikely(!pd))
39 return NULL;
40
41 pd->entry = kcalloc(count, sizeof(*pd->entry), I915_GFP_ALLOW_FAIL);
42 if (unlikely(!pd->entry)) {
43 kfree(pd);
44 return NULL;
45 }
46
47 spin_lock_init(&pd->lock);
48 return pd;
49 }
50
alloc_pd(struct i915_address_space * vm)51 struct i915_page_directory *alloc_pd(struct i915_address_space *vm)
52 {
53 struct i915_page_directory *pd;
54
55 pd = __alloc_pd(I915_PDES);
56 if (unlikely(!pd))
57 return ERR_PTR(-ENOMEM);
58
59 pd->pt.base = vm->alloc_pt_dma(vm, I915_GTT_PAGE_SIZE_4K);
60 if (IS_ERR(pd->pt.base)) {
61 kfree(pd->entry);
62 kfree(pd);
63 return ERR_PTR(-ENOMEM);
64 }
65
66 return pd;
67 }
68
free_px(struct i915_address_space * vm,struct i915_page_table * pt,int lvl)69 void free_px(struct i915_address_space *vm, struct i915_page_table *pt, int lvl)
70 {
71 BUILD_BUG_ON(offsetof(struct i915_page_directory, pt));
72
73 if (lvl) {
74 struct i915_page_directory *pd =
75 container_of(pt, typeof(*pd), pt);
76 kfree(pd->entry);
77 }
78
79 if (pt->base)
80 i915_gem_object_put(pt->base);
81
82 kfree(pt);
83 }
84
85 static void
write_dma_entry(struct drm_i915_gem_object * const pdma,const unsigned short idx,const u64 encoded_entry)86 write_dma_entry(struct drm_i915_gem_object * const pdma,
87 const unsigned short idx,
88 const u64 encoded_entry)
89 {
90 u64 * const vaddr = __px_vaddr(pdma);
91
92 vaddr[idx] = encoded_entry;
93 clflush_cache_range(&vaddr[idx], sizeof(u64));
94 }
95
96 void
__set_pd_entry(struct i915_page_directory * const pd,const unsigned short idx,struct i915_page_table * const to,u64 (* encode)(const dma_addr_t,const enum i915_cache_level))97 __set_pd_entry(struct i915_page_directory * const pd,
98 const unsigned short idx,
99 struct i915_page_table * const to,
100 u64 (*encode)(const dma_addr_t, const enum i915_cache_level))
101 {
102 /* Each thread pre-pins the pd, and we may have a thread per pde. */
103 GEM_BUG_ON(atomic_read(px_used(pd)) > NALLOC * I915_PDES);
104
105 atomic_inc(px_used(pd));
106 pd->entry[idx] = to;
107 write_dma_entry(px_base(pd), idx, encode(px_dma(to), I915_CACHE_LLC));
108 }
109
110 void
clear_pd_entry(struct i915_page_directory * const pd,const unsigned short idx,const struct drm_i915_gem_object * const scratch)111 clear_pd_entry(struct i915_page_directory * const pd,
112 const unsigned short idx,
113 const struct drm_i915_gem_object * const scratch)
114 {
115 GEM_BUG_ON(atomic_read(px_used(pd)) == 0);
116
117 write_dma_entry(px_base(pd), idx, scratch->encode);
118 pd->entry[idx] = NULL;
119 atomic_dec(px_used(pd));
120 }
121
122 bool
release_pd_entry(struct i915_page_directory * const pd,const unsigned short idx,struct i915_page_table * const pt,const struct drm_i915_gem_object * const scratch)123 release_pd_entry(struct i915_page_directory * const pd,
124 const unsigned short idx,
125 struct i915_page_table * const pt,
126 const struct drm_i915_gem_object * const scratch)
127 {
128 bool free = false;
129
130 if (atomic_add_unless(&pt->used, -1, 1))
131 return false;
132
133 spin_lock(&pd->lock);
134 if (atomic_dec_and_test(&pt->used)) {
135 clear_pd_entry(pd, idx, scratch);
136 free = true;
137 }
138 spin_unlock(&pd->lock);
139
140 return free;
141 }
142
i915_ppgtt_init_hw(struct intel_gt * gt)143 int i915_ppgtt_init_hw(struct intel_gt *gt)
144 {
145 struct drm_i915_private *i915 = gt->i915;
146
147 gtt_write_workarounds(gt);
148
149 if (GRAPHICS_VER(i915) == 6)
150 gen6_ppgtt_enable(gt);
151 else if (GRAPHICS_VER(i915) == 7)
152 gen7_ppgtt_enable(gt);
153
154 return 0;
155 }
156
157 static struct i915_ppgtt *
__ppgtt_create(struct intel_gt * gt,unsigned long lmem_pt_obj_flags)158 __ppgtt_create(struct intel_gt *gt, unsigned long lmem_pt_obj_flags)
159 {
160 if (GRAPHICS_VER(gt->i915) < 8)
161 return gen6_ppgtt_create(gt);
162 else
163 return gen8_ppgtt_create(gt, lmem_pt_obj_flags);
164 }
165
i915_ppgtt_create(struct intel_gt * gt,unsigned long lmem_pt_obj_flags)166 struct i915_ppgtt *i915_ppgtt_create(struct intel_gt *gt,
167 unsigned long lmem_pt_obj_flags)
168 {
169 struct i915_ppgtt *ppgtt;
170
171 ppgtt = __ppgtt_create(gt, lmem_pt_obj_flags);
172 if (IS_ERR(ppgtt))
173 return ppgtt;
174
175 trace_i915_ppgtt_create(&ppgtt->vm);
176
177 return ppgtt;
178 }
179
ppgtt_bind_vma(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,struct i915_vma * vma,enum i915_cache_level cache_level,u32 flags)180 void ppgtt_bind_vma(struct i915_address_space *vm,
181 struct i915_vm_pt_stash *stash,
182 struct i915_vma *vma,
183 enum i915_cache_level cache_level,
184 u32 flags)
185 {
186 u32 pte_flags;
187
188 if (!test_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma))) {
189 vm->allocate_va_range(vm, stash, vma->node.start, vma->size);
190 set_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma));
191 }
192
193 /* Applicable to VLV, and gen8+ */
194 pte_flags = 0;
195 if (i915_gem_object_is_readonly(vma->obj))
196 pte_flags |= PTE_READ_ONLY;
197 if (i915_gem_object_is_lmem(vma->obj))
198 pte_flags |= PTE_LM;
199
200 vm->insert_entries(vm, vma, cache_level, pte_flags);
201 wmb();
202 }
203
ppgtt_unbind_vma(struct i915_address_space * vm,struct i915_vma * vma)204 void ppgtt_unbind_vma(struct i915_address_space *vm, struct i915_vma *vma)
205 {
206 if (test_and_clear_bit(I915_VMA_ALLOC_BIT, __i915_vma_flags(vma)))
207 vm->clear_range(vm, vma->node.start, vma->size);
208 }
209
pd_count(u64 size,int shift)210 static unsigned long pd_count(u64 size, int shift)
211 {
212 /* Beware later misalignment */
213 return (size + 2 * (BIT_ULL(shift) - 1)) >> shift;
214 }
215
i915_vm_alloc_pt_stash(struct i915_address_space * vm,struct i915_vm_pt_stash * stash,u64 size)216 int i915_vm_alloc_pt_stash(struct i915_address_space *vm,
217 struct i915_vm_pt_stash *stash,
218 u64 size)
219 {
220 unsigned long count;
221 int shift, n;
222
223 shift = vm->pd_shift;
224 if (!shift)
225 return 0;
226
227 count = pd_count(size, shift);
228 while (count--) {
229 struct i915_page_table *pt;
230
231 pt = alloc_pt(vm);
232 if (IS_ERR(pt)) {
233 i915_vm_free_pt_stash(vm, stash);
234 return PTR_ERR(pt);
235 }
236
237 pt->stash = stash->pt[0];
238 stash->pt[0] = pt;
239 }
240
241 for (n = 1; n < vm->top; n++) {
242 shift += ilog2(I915_PDES); /* Each PD holds 512 entries */
243 count = pd_count(size, shift);
244 while (count--) {
245 struct i915_page_directory *pd;
246
247 pd = alloc_pd(vm);
248 if (IS_ERR(pd)) {
249 i915_vm_free_pt_stash(vm, stash);
250 return PTR_ERR(pd);
251 }
252
253 pd->pt.stash = stash->pt[1];
254 stash->pt[1] = &pd->pt;
255 }
256 }
257
258 return 0;
259 }
260
i915_vm_map_pt_stash(struct i915_address_space * vm,struct i915_vm_pt_stash * stash)261 int i915_vm_map_pt_stash(struct i915_address_space *vm,
262 struct i915_vm_pt_stash *stash)
263 {
264 struct i915_page_table *pt;
265 int n, err;
266
267 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
268 for (pt = stash->pt[n]; pt; pt = pt->stash) {
269 err = map_pt_dma_locked(vm, pt->base);
270 if (err)
271 return err;
272 }
273 }
274
275 return 0;
276 }
277
i915_vm_free_pt_stash(struct i915_address_space * vm,struct i915_vm_pt_stash * stash)278 void i915_vm_free_pt_stash(struct i915_address_space *vm,
279 struct i915_vm_pt_stash *stash)
280 {
281 struct i915_page_table *pt;
282 int n;
283
284 for (n = 0; n < ARRAY_SIZE(stash->pt); n++) {
285 while ((pt = stash->pt[n])) {
286 stash->pt[n] = pt->stash;
287 free_px(vm, pt, n);
288 }
289 }
290 }
291
ppgtt_set_pages(struct i915_vma * vma)292 int ppgtt_set_pages(struct i915_vma *vma)
293 {
294 GEM_BUG_ON(vma->pages);
295
296 vma->pages = vma->obj->mm.pages;
297 vma->page_sizes = vma->obj->mm.page_sizes;
298
299 return 0;
300 }
301
ppgtt_init(struct i915_ppgtt * ppgtt,struct intel_gt * gt,unsigned long lmem_pt_obj_flags)302 void ppgtt_init(struct i915_ppgtt *ppgtt, struct intel_gt *gt,
303 unsigned long lmem_pt_obj_flags)
304 {
305 struct drm_i915_private *i915 = gt->i915;
306
307 ppgtt->vm.gt = gt;
308 ppgtt->vm.i915 = i915;
309 ppgtt->vm.dma = i915->drm.dev;
310 ppgtt->vm.total = BIT_ULL(INTEL_INFO(i915)->ppgtt_size);
311 ppgtt->vm.lmem_pt_obj_flags = lmem_pt_obj_flags;
312
313 dma_resv_init(&ppgtt->vm._resv);
314 i915_address_space_init(&ppgtt->vm, VM_CLASS_PPGTT);
315
316 ppgtt->vm.vma_ops.bind_vma = ppgtt_bind_vma;
317 ppgtt->vm.vma_ops.unbind_vma = ppgtt_unbind_vma;
318 ppgtt->vm.vma_ops.set_pages = ppgtt_set_pages;
319 ppgtt->vm.vma_ops.clear_pages = clear_pages;
320 }
321