1 /*
2 * SPDX-License-Identifier: MIT
3 *
4 * Copyright © 2008-2015 Intel Corporation
5 */
6
7 #include <linux/oom.h>
8 #include <linux/sched/mm.h>
9 #include <linux/shmem_fs.h>
10 #include <linux/slab.h>
11 #include <linux/swap.h>
12 #include <linux/pci.h>
13 #include <linux/dma-buf.h>
14 #include <linux/vmalloc.h>
15
16 #include "gt/intel_gt_requests.h"
17
18 #include "dma_resv_utils.h"
19 #include "i915_trace.h"
20
swap_available(void)21 static bool swap_available(void)
22 {
23 return get_nr_swap_pages() > 0;
24 }
25
can_release_pages(struct drm_i915_gem_object * obj)26 static bool can_release_pages(struct drm_i915_gem_object *obj)
27 {
28 /* Consider only shrinkable ojects. */
29 if (!i915_gem_object_is_shrinkable(obj))
30 return false;
31
32 /*
33 * We can only return physical pages to the system if we can either
34 * discard the contents (because the user has marked them as being
35 * purgeable) or if we can move their contents out to swap.
36 */
37 return swap_available() || obj->mm.madv == I915_MADV_DONTNEED;
38 }
39
unsafe_drop_pages(struct drm_i915_gem_object * obj,unsigned long shrink,bool trylock_vm)40 static bool unsafe_drop_pages(struct drm_i915_gem_object *obj,
41 unsigned long shrink, bool trylock_vm)
42 {
43 unsigned long flags;
44
45 flags = 0;
46 if (shrink & I915_SHRINK_ACTIVE)
47 flags |= I915_GEM_OBJECT_UNBIND_ACTIVE;
48 if (!(shrink & I915_SHRINK_BOUND))
49 flags |= I915_GEM_OBJECT_UNBIND_TEST;
50 if (trylock_vm)
51 flags |= I915_GEM_OBJECT_UNBIND_VM_TRYLOCK;
52
53 if (i915_gem_object_unbind(obj, flags) == 0)
54 return true;
55
56 return false;
57 }
58
try_to_writeback(struct drm_i915_gem_object * obj,unsigned int flags)59 static void try_to_writeback(struct drm_i915_gem_object *obj,
60 unsigned int flags)
61 {
62 switch (obj->mm.madv) {
63 case I915_MADV_DONTNEED:
64 i915_gem_object_truncate(obj);
65 return;
66 case __I915_MADV_PURGED:
67 return;
68 }
69
70 if (flags & I915_SHRINK_WRITEBACK)
71 i915_gem_object_writeback(obj);
72 }
73
74 /**
75 * i915_gem_shrink - Shrink buffer object caches
76 * @ww: i915 gem ww acquire ctx, or NULL
77 * @i915: i915 device
78 * @target: amount of memory to make available, in pages
79 * @nr_scanned: optional output for number of pages scanned (incremental)
80 * @shrink: control flags for selecting cache types
81 *
82 * This function is the main interface to the shrinker. It will try to release
83 * up to @target pages of main memory backing storage from buffer objects.
84 * Selection of the specific caches can be done with @flags. This is e.g. useful
85 * when purgeable objects should be removed from caches preferentially.
86 *
87 * Note that it's not guaranteed that released amount is actually available as
88 * free system memory - the pages might still be in-used to due to other reasons
89 * (like cpu mmaps) or the mm core has reused them before we could grab them.
90 * Therefore code that needs to explicitly shrink buffer objects caches (e.g. to
91 * avoid deadlocks in memory reclaim) must fall back to i915_gem_shrink_all().
92 *
93 * Also note that any kind of pinning (both per-vma address space pins and
94 * backing storage pins at the buffer object level) result in the shrinker code
95 * having to skip the object.
96 *
97 * Returns:
98 * The number of pages of backing storage actually released.
99 */
100 unsigned long
i915_gem_shrink(struct i915_gem_ww_ctx * ww,struct drm_i915_private * i915,unsigned long target,unsigned long * nr_scanned,unsigned int shrink)101 i915_gem_shrink(struct i915_gem_ww_ctx *ww,
102 struct drm_i915_private *i915,
103 unsigned long target,
104 unsigned long *nr_scanned,
105 unsigned int shrink)
106 {
107 const struct {
108 struct list_head *list;
109 unsigned int bit;
110 } phases[] = {
111 { &i915->mm.purge_list, ~0u },
112 {
113 &i915->mm.shrink_list,
114 I915_SHRINK_BOUND | I915_SHRINK_UNBOUND
115 },
116 { NULL, 0 },
117 }, *phase;
118 intel_wakeref_t wakeref = 0;
119 unsigned long count = 0;
120 unsigned long scanned = 0;
121 int err = 0;
122
123 /* CHV + VTD workaround use stop_machine(); need to trylock vm->mutex */
124 bool trylock_vm = !ww && intel_vm_no_concurrent_access_wa(i915);
125
126 trace_i915_gem_shrink(i915, target, shrink);
127
128 /*
129 * Unbinding of objects will require HW access; Let us not wake the
130 * device just to recover a little memory. If absolutely necessary,
131 * we will force the wake during oom-notifier.
132 */
133 if (shrink & I915_SHRINK_BOUND) {
134 wakeref = intel_runtime_pm_get_if_in_use(&i915->runtime_pm);
135 if (!wakeref)
136 shrink &= ~I915_SHRINK_BOUND;
137 }
138
139 /*
140 * When shrinking the active list, we should also consider active
141 * contexts. Active contexts are pinned until they are retired, and
142 * so can not be simply unbound to retire and unpin their pages. To
143 * shrink the contexts, we must wait until the gpu is idle and
144 * completed its switch to the kernel context. In short, we do
145 * not have a good mechanism for idling a specific context, but
146 * what we can do is give them a kick so that we do not keep idle
147 * contexts around longer than is necessary.
148 */
149 if (shrink & I915_SHRINK_ACTIVE)
150 /* Retire requests to unpin all idle contexts */
151 intel_gt_retire_requests(&i915->gt);
152
153 /*
154 * As we may completely rewrite the (un)bound list whilst unbinding
155 * (due to retiring requests) we have to strictly process only
156 * one element of the list at the time, and recheck the list
157 * on every iteration.
158 *
159 * In particular, we must hold a reference whilst removing the
160 * object as we may end up waiting for and/or retiring the objects.
161 * This might release the final reference (held by the active list)
162 * and result in the object being freed from under us. This is
163 * similar to the precautions the eviction code must take whilst
164 * removing objects.
165 *
166 * Also note that although these lists do not hold a reference to
167 * the object we can safely grab one here: The final object
168 * unreferencing and the bound_list are both protected by the
169 * dev->struct_mutex and so we won't ever be able to observe an
170 * object on the bound_list with a reference count equals 0.
171 */
172 for (phase = phases; phase->list; phase++) {
173 struct list_head still_in_list;
174 struct drm_i915_gem_object *obj;
175 unsigned long flags;
176
177 if ((shrink & phase->bit) == 0)
178 continue;
179
180 INIT_LIST_HEAD(&still_in_list);
181
182 /*
183 * We serialize our access to unreferenced objects through
184 * the use of the struct_mutex. While the objects are not
185 * yet freed (due to RCU then a workqueue) we still want
186 * to be able to shrink their pages, so they remain on
187 * the unbound/bound list until actually freed.
188 */
189 spin_lock_irqsave(&i915->mm.obj_lock, flags);
190 while (count < target &&
191 (obj = list_first_entry_or_null(phase->list,
192 typeof(*obj),
193 mm.link))) {
194 list_move_tail(&obj->mm.link, &still_in_list);
195
196 if (shrink & I915_SHRINK_VMAPS &&
197 !is_vmalloc_addr(obj->mm.mapping))
198 continue;
199
200 if (!(shrink & I915_SHRINK_ACTIVE) &&
201 i915_gem_object_is_framebuffer(obj))
202 continue;
203
204 if (!can_release_pages(obj))
205 continue;
206
207 if (!kref_get_unless_zero(&obj->base.refcount))
208 continue;
209
210 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
211
212 err = 0;
213 if (unsafe_drop_pages(obj, shrink, trylock_vm)) {
214 /* May arrive from get_pages on another bo */
215 if (!ww) {
216 if (!i915_gem_object_trylock(obj))
217 goto skip;
218 } else {
219 err = i915_gem_object_lock(obj, ww);
220 if (err)
221 goto skip;
222 }
223
224 if (!__i915_gem_object_put_pages(obj)) {
225 try_to_writeback(obj, shrink);
226 count += obj->base.size >> PAGE_SHIFT;
227 }
228 if (!ww)
229 i915_gem_object_unlock(obj);
230 }
231
232 dma_resv_prune(obj->base.resv);
233
234 scanned += obj->base.size >> PAGE_SHIFT;
235 skip:
236 i915_gem_object_put(obj);
237
238 spin_lock_irqsave(&i915->mm.obj_lock, flags);
239 if (err)
240 break;
241 }
242 list_splice_tail(&still_in_list, phase->list);
243 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
244 if (err)
245 break;
246 }
247
248 if (shrink & I915_SHRINK_BOUND)
249 intel_runtime_pm_put(&i915->runtime_pm, wakeref);
250
251 if (err)
252 return err;
253
254 if (nr_scanned)
255 *nr_scanned += scanned;
256 return count;
257 }
258
259 /**
260 * i915_gem_shrink_all - Shrink buffer object caches completely
261 * @i915: i915 device
262 *
263 * This is a simple wraper around i915_gem_shrink() to aggressively shrink all
264 * caches completely. It also first waits for and retires all outstanding
265 * requests to also be able to release backing storage for active objects.
266 *
267 * This should only be used in code to intentionally quiescent the gpu or as a
268 * last-ditch effort when memory seems to have run out.
269 *
270 * Returns:
271 * The number of pages of backing storage actually released.
272 */
i915_gem_shrink_all(struct drm_i915_private * i915)273 unsigned long i915_gem_shrink_all(struct drm_i915_private *i915)
274 {
275 intel_wakeref_t wakeref;
276 unsigned long freed = 0;
277
278 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
279 freed = i915_gem_shrink(NULL, i915, -1UL, NULL,
280 I915_SHRINK_BOUND |
281 I915_SHRINK_UNBOUND);
282 }
283
284 return freed;
285 }
286
287 static unsigned long
i915_gem_shrinker_count(struct shrinker * shrinker,struct shrink_control * sc)288 i915_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
289 {
290 struct drm_i915_private *i915 =
291 container_of(shrinker, struct drm_i915_private, mm.shrinker);
292 unsigned long num_objects;
293 unsigned long count;
294
295 count = READ_ONCE(i915->mm.shrink_memory) >> PAGE_SHIFT;
296 num_objects = READ_ONCE(i915->mm.shrink_count);
297
298 /*
299 * Update our preferred vmscan batch size for the next pass.
300 * Our rough guess for an effective batch size is roughly 2
301 * available GEM objects worth of pages. That is we don't want
302 * the shrinker to fire, until it is worth the cost of freeing an
303 * entire GEM object.
304 */
305 if (num_objects) {
306 unsigned long avg = 2 * count / num_objects;
307
308 i915->mm.shrinker.batch =
309 max((i915->mm.shrinker.batch + avg) >> 1,
310 128ul /* default SHRINK_BATCH */);
311 }
312
313 return count;
314 }
315
316 static unsigned long
i915_gem_shrinker_scan(struct shrinker * shrinker,struct shrink_control * sc)317 i915_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
318 {
319 struct drm_i915_private *i915 =
320 container_of(shrinker, struct drm_i915_private, mm.shrinker);
321 unsigned long freed;
322
323 sc->nr_scanned = 0;
324
325 freed = i915_gem_shrink(NULL, i915,
326 sc->nr_to_scan,
327 &sc->nr_scanned,
328 I915_SHRINK_BOUND |
329 I915_SHRINK_UNBOUND);
330 if (sc->nr_scanned < sc->nr_to_scan && current_is_kswapd()) {
331 intel_wakeref_t wakeref;
332
333 with_intel_runtime_pm(&i915->runtime_pm, wakeref) {
334 freed += i915_gem_shrink(NULL, i915,
335 sc->nr_to_scan - sc->nr_scanned,
336 &sc->nr_scanned,
337 I915_SHRINK_ACTIVE |
338 I915_SHRINK_BOUND |
339 I915_SHRINK_UNBOUND |
340 I915_SHRINK_WRITEBACK);
341 }
342 }
343
344 return sc->nr_scanned ? freed : SHRINK_STOP;
345 }
346
347 static int
i915_gem_shrinker_oom(struct notifier_block * nb,unsigned long event,void * ptr)348 i915_gem_shrinker_oom(struct notifier_block *nb, unsigned long event, void *ptr)
349 {
350 struct drm_i915_private *i915 =
351 container_of(nb, struct drm_i915_private, mm.oom_notifier);
352 struct drm_i915_gem_object *obj;
353 unsigned long unevictable, available, freed_pages;
354 intel_wakeref_t wakeref;
355 unsigned long flags;
356
357 freed_pages = 0;
358 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
359 freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
360 I915_SHRINK_BOUND |
361 I915_SHRINK_UNBOUND |
362 I915_SHRINK_WRITEBACK);
363
364 /* Because we may be allocating inside our own driver, we cannot
365 * assert that there are no objects with pinned pages that are not
366 * being pointed to by hardware.
367 */
368 available = unevictable = 0;
369 spin_lock_irqsave(&i915->mm.obj_lock, flags);
370 list_for_each_entry(obj, &i915->mm.shrink_list, mm.link) {
371 if (!can_release_pages(obj))
372 unevictable += obj->base.size >> PAGE_SHIFT;
373 else
374 available += obj->base.size >> PAGE_SHIFT;
375 }
376 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
377
378 if (freed_pages || available)
379 pr_info("Purging GPU memory, %lu pages freed, "
380 "%lu pages still pinned, %lu pages left available.\n",
381 freed_pages, unevictable, available);
382
383 *(unsigned long *)ptr += freed_pages;
384 return NOTIFY_DONE;
385 }
386
387 static int
i915_gem_shrinker_vmap(struct notifier_block * nb,unsigned long event,void * ptr)388 i915_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
389 {
390 struct drm_i915_private *i915 =
391 container_of(nb, struct drm_i915_private, mm.vmap_notifier);
392 struct i915_vma *vma, *next;
393 unsigned long freed_pages = 0;
394 intel_wakeref_t wakeref;
395
396 with_intel_runtime_pm(&i915->runtime_pm, wakeref)
397 freed_pages += i915_gem_shrink(NULL, i915, -1UL, NULL,
398 I915_SHRINK_BOUND |
399 I915_SHRINK_UNBOUND |
400 I915_SHRINK_VMAPS);
401
402 /* We also want to clear any cached iomaps as they wrap vmap */
403 mutex_lock(&i915->ggtt.vm.mutex);
404 list_for_each_entry_safe(vma, next,
405 &i915->ggtt.vm.bound_list, vm_link) {
406 unsigned long count = vma->node.size >> PAGE_SHIFT;
407
408 if (!vma->iomap || i915_vma_is_active(vma))
409 continue;
410
411 if (__i915_vma_unbind(vma) == 0)
412 freed_pages += count;
413 }
414 mutex_unlock(&i915->ggtt.vm.mutex);
415
416 *(unsigned long *)ptr += freed_pages;
417 return NOTIFY_DONE;
418 }
419
i915_gem_driver_register__shrinker(struct drm_i915_private * i915)420 void i915_gem_driver_register__shrinker(struct drm_i915_private *i915)
421 {
422 i915->mm.shrinker.scan_objects = i915_gem_shrinker_scan;
423 i915->mm.shrinker.count_objects = i915_gem_shrinker_count;
424 i915->mm.shrinker.seeks = DEFAULT_SEEKS;
425 i915->mm.shrinker.batch = 4096;
426 drm_WARN_ON(&i915->drm, register_shrinker(&i915->mm.shrinker));
427
428 i915->mm.oom_notifier.notifier_call = i915_gem_shrinker_oom;
429 drm_WARN_ON(&i915->drm, register_oom_notifier(&i915->mm.oom_notifier));
430
431 i915->mm.vmap_notifier.notifier_call = i915_gem_shrinker_vmap;
432 drm_WARN_ON(&i915->drm,
433 register_vmap_purge_notifier(&i915->mm.vmap_notifier));
434 }
435
i915_gem_driver_unregister__shrinker(struct drm_i915_private * i915)436 void i915_gem_driver_unregister__shrinker(struct drm_i915_private *i915)
437 {
438 drm_WARN_ON(&i915->drm,
439 unregister_vmap_purge_notifier(&i915->mm.vmap_notifier));
440 drm_WARN_ON(&i915->drm,
441 unregister_oom_notifier(&i915->mm.oom_notifier));
442 unregister_shrinker(&i915->mm.shrinker);
443 }
444
i915_gem_shrinker_taints_mutex(struct drm_i915_private * i915,struct mutex * mutex)445 void i915_gem_shrinker_taints_mutex(struct drm_i915_private *i915,
446 struct mutex *mutex)
447 {
448 if (!IS_ENABLED(CONFIG_LOCKDEP))
449 return;
450
451 fs_reclaim_acquire(GFP_KERNEL);
452
453 mutex_acquire(&mutex->dep_map, 0, 0, _RET_IP_);
454 mutex_release(&mutex->dep_map, _RET_IP_);
455
456 fs_reclaim_release(GFP_KERNEL);
457 }
458
459 #define obj_to_i915(obj__) to_i915((obj__)->base.dev)
460
i915_gem_object_make_unshrinkable(struct drm_i915_gem_object * obj)461 void i915_gem_object_make_unshrinkable(struct drm_i915_gem_object *obj)
462 {
463 struct drm_i915_private *i915 = obj_to_i915(obj);
464 unsigned long flags;
465
466 /*
467 * We can only be called while the pages are pinned or when
468 * the pages are released. If pinned, we should only be called
469 * from a single caller under controlled conditions; and on release
470 * only one caller may release us. Neither the two may cross.
471 */
472 if (atomic_add_unless(&obj->mm.shrink_pin, 1, 0))
473 return;
474
475 spin_lock_irqsave(&i915->mm.obj_lock, flags);
476 if (!atomic_fetch_inc(&obj->mm.shrink_pin) &&
477 !list_empty(&obj->mm.link)) {
478 list_del_init(&obj->mm.link);
479 i915->mm.shrink_count--;
480 i915->mm.shrink_memory -= obj->base.size;
481 }
482 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
483 }
484
__i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj,struct list_head * head)485 static void __i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj,
486 struct list_head *head)
487 {
488 struct drm_i915_private *i915 = obj_to_i915(obj);
489 unsigned long flags;
490
491 GEM_BUG_ON(!i915_gem_object_has_pages(obj));
492 if (!i915_gem_object_is_shrinkable(obj))
493 return;
494
495 if (atomic_add_unless(&obj->mm.shrink_pin, -1, 1))
496 return;
497
498 spin_lock_irqsave(&i915->mm.obj_lock, flags);
499 GEM_BUG_ON(!kref_read(&obj->base.refcount));
500 if (atomic_dec_and_test(&obj->mm.shrink_pin)) {
501 GEM_BUG_ON(!list_empty(&obj->mm.link));
502
503 list_add_tail(&obj->mm.link, head);
504 i915->mm.shrink_count++;
505 i915->mm.shrink_memory += obj->base.size;
506
507 }
508 spin_unlock_irqrestore(&i915->mm.obj_lock, flags);
509 }
510
i915_gem_object_make_shrinkable(struct drm_i915_gem_object * obj)511 void i915_gem_object_make_shrinkable(struct drm_i915_gem_object *obj)
512 {
513 __i915_gem_object_make_shrinkable(obj,
514 &obj_to_i915(obj)->mm.shrink_list);
515 }
516
i915_gem_object_make_purgeable(struct drm_i915_gem_object * obj)517 void i915_gem_object_make_purgeable(struct drm_i915_gem_object *obj)
518 {
519 __i915_gem_object_make_shrinkable(obj,
520 &obj_to_i915(obj)->mm.purge_list);
521 }
522