1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright (C) 2012-2014 Canonical Ltd (Maarten Lankhorst)
4 *
5 * Based on bo.c which bears the following copyright notice,
6 * but is dual licensed:
7 *
8 * Copyright (c) 2006-2009 VMware, Inc., Palo Alto, CA., USA
9 * All Rights Reserved.
10 *
11 * Permission is hereby granted, free of charge, to any person obtaining a
12 * copy of this software and associated documentation files (the
13 * "Software"), to deal in the Software without restriction, including
14 * without limitation the rights to use, copy, modify, merge, publish,
15 * distribute, sub license, and/or sell copies of the Software, and to
16 * permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice (including the
20 * next paragraph) shall be included in all copies or substantial portions
21 * of the Software.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
24 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
25 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
26 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
27 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
28 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
29 * USE OR OTHER DEALINGS IN THE SOFTWARE.
30 *
31 **************************************************************************/
32 /*
33 * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
34 */
35
36 #include <linux/dma-resv.h>
37 #include <linux/export.h>
38 #include <linux/mm.h>
39 #include <linux/sched/mm.h>
40 #include <linux/mmu_notifier.h>
41
42 /**
43 * DOC: Reservation Object Overview
44 *
45 * The reservation object provides a mechanism to manage shared and
46 * exclusive fences associated with a buffer. A reservation object
47 * can have attached one exclusive fence (normally associated with
48 * write operations) or N shared fences (read operations). The RCU
49 * mechanism is used to protect read access to fences from locked
50 * write-side updates.
51 *
52 * See struct dma_resv for more details.
53 */
54
55 DEFINE_WD_CLASS(reservation_ww_class);
56 EXPORT_SYMBOL(reservation_ww_class);
57
58 /**
59 * dma_resv_list_alloc - allocate fence list
60 * @shared_max: number of fences we need space for
61 *
62 * Allocate a new dma_resv_list and make sure to correctly initialize
63 * shared_max.
64 */
dma_resv_list_alloc(unsigned int shared_max)65 static struct dma_resv_list *dma_resv_list_alloc(unsigned int shared_max)
66 {
67 struct dma_resv_list *list;
68
69 list = kmalloc(struct_size(list, shared, shared_max), GFP_KERNEL);
70 if (!list)
71 return NULL;
72
73 list->shared_max = (ksize(list) - offsetof(typeof(*list), shared)) /
74 sizeof(*list->shared);
75
76 return list;
77 }
78
79 /**
80 * dma_resv_list_free - free fence list
81 * @list: list to free
82 *
83 * Free a dma_resv_list and make sure to drop all references.
84 */
dma_resv_list_free(struct dma_resv_list * list)85 static void dma_resv_list_free(struct dma_resv_list *list)
86 {
87 unsigned int i;
88
89 if (!list)
90 return;
91
92 for (i = 0; i < list->shared_count; ++i)
93 dma_fence_put(rcu_dereference_protected(list->shared[i], true));
94
95 kfree_rcu(list, rcu);
96 }
97
98 /**
99 * dma_resv_init - initialize a reservation object
100 * @obj: the reservation object
101 */
dma_resv_init(struct dma_resv * obj)102 void dma_resv_init(struct dma_resv *obj)
103 {
104 ww_mutex_init(&obj->lock, &reservation_ww_class);
105 seqcount_ww_mutex_init(&obj->seq, &obj->lock);
106
107 RCU_INIT_POINTER(obj->fence, NULL);
108 RCU_INIT_POINTER(obj->fence_excl, NULL);
109 }
110 EXPORT_SYMBOL(dma_resv_init);
111
112 /**
113 * dma_resv_fini - destroys a reservation object
114 * @obj: the reservation object
115 */
dma_resv_fini(struct dma_resv * obj)116 void dma_resv_fini(struct dma_resv *obj)
117 {
118 struct dma_resv_list *fobj;
119 struct dma_fence *excl;
120
121 /*
122 * This object should be dead and all references must have
123 * been released to it, so no need to be protected with rcu.
124 */
125 excl = rcu_dereference_protected(obj->fence_excl, 1);
126 if (excl)
127 dma_fence_put(excl);
128
129 fobj = rcu_dereference_protected(obj->fence, 1);
130 dma_resv_list_free(fobj);
131 ww_mutex_destroy(&obj->lock);
132 }
133 EXPORT_SYMBOL(dma_resv_fini);
134
135 /**
136 * dma_resv_reserve_shared - Reserve space to add shared fences to
137 * a dma_resv.
138 * @obj: reservation object
139 * @num_fences: number of fences we want to add
140 *
141 * Should be called before dma_resv_add_shared_fence(). Must
142 * be called with @obj locked through dma_resv_lock().
143 *
144 * Note that the preallocated slots need to be re-reserved if @obj is unlocked
145 * at any time before calling dma_resv_add_shared_fence(). This is validated
146 * when CONFIG_DEBUG_MUTEXES is enabled.
147 *
148 * RETURNS
149 * Zero for success, or -errno
150 */
dma_resv_reserve_shared(struct dma_resv * obj,unsigned int num_fences)151 int dma_resv_reserve_shared(struct dma_resv *obj, unsigned int num_fences)
152 {
153 struct dma_resv_list *old, *new;
154 unsigned int i, j, k, max;
155
156 dma_resv_assert_held(obj);
157
158 old = dma_resv_shared_list(obj);
159 if (old && old->shared_max) {
160 if ((old->shared_count + num_fences) <= old->shared_max)
161 return 0;
162 max = max(old->shared_count + num_fences, old->shared_max * 2);
163 } else {
164 max = max(4ul, roundup_pow_of_two(num_fences));
165 }
166
167 new = dma_resv_list_alloc(max);
168 if (!new)
169 return -ENOMEM;
170
171 /*
172 * no need to bump fence refcounts, rcu_read access
173 * requires the use of kref_get_unless_zero, and the
174 * references from the old struct are carried over to
175 * the new.
176 */
177 for (i = 0, j = 0, k = max; i < (old ? old->shared_count : 0); ++i) {
178 struct dma_fence *fence;
179
180 fence = rcu_dereference_protected(old->shared[i],
181 dma_resv_held(obj));
182 if (dma_fence_is_signaled(fence))
183 RCU_INIT_POINTER(new->shared[--k], fence);
184 else
185 RCU_INIT_POINTER(new->shared[j++], fence);
186 }
187 new->shared_count = j;
188
189 /*
190 * We are not changing the effective set of fences here so can
191 * merely update the pointer to the new array; both existing
192 * readers and new readers will see exactly the same set of
193 * active (unsignaled) shared fences. Individual fences and the
194 * old array are protected by RCU and so will not vanish under
195 * the gaze of the rcu_read_lock() readers.
196 */
197 rcu_assign_pointer(obj->fence, new);
198
199 if (!old)
200 return 0;
201
202 /* Drop the references to the signaled fences */
203 for (i = k; i < max; ++i) {
204 struct dma_fence *fence;
205
206 fence = rcu_dereference_protected(new->shared[i],
207 dma_resv_held(obj));
208 dma_fence_put(fence);
209 }
210 kfree_rcu(old, rcu);
211
212 return 0;
213 }
214 EXPORT_SYMBOL(dma_resv_reserve_shared);
215
216 #ifdef CONFIG_DEBUG_MUTEXES
217 /**
218 * dma_resv_reset_shared_max - reset shared fences for debugging
219 * @obj: the dma_resv object to reset
220 *
221 * Reset the number of pre-reserved shared slots to test that drivers do
222 * correct slot allocation using dma_resv_reserve_shared(). See also
223 * &dma_resv_list.shared_max.
224 */
dma_resv_reset_shared_max(struct dma_resv * obj)225 void dma_resv_reset_shared_max(struct dma_resv *obj)
226 {
227 struct dma_resv_list *fences = dma_resv_shared_list(obj);
228
229 dma_resv_assert_held(obj);
230
231 /* Test shared fence slot reservation */
232 if (fences)
233 fences->shared_max = fences->shared_count;
234 }
235 EXPORT_SYMBOL(dma_resv_reset_shared_max);
236 #endif
237
238 /**
239 * dma_resv_add_shared_fence - Add a fence to a shared slot
240 * @obj: the reservation object
241 * @fence: the shared fence to add
242 *
243 * Add a fence to a shared slot, @obj must be locked with dma_resv_lock(), and
244 * dma_resv_reserve_shared() has been called.
245 *
246 * See also &dma_resv.fence for a discussion of the semantics.
247 */
dma_resv_add_shared_fence(struct dma_resv * obj,struct dma_fence * fence)248 void dma_resv_add_shared_fence(struct dma_resv *obj, struct dma_fence *fence)
249 {
250 struct dma_resv_list *fobj;
251 struct dma_fence *old;
252 unsigned int i, count;
253
254 dma_fence_get(fence);
255
256 dma_resv_assert_held(obj);
257
258 fobj = dma_resv_shared_list(obj);
259 count = fobj->shared_count;
260
261 write_seqcount_begin(&obj->seq);
262
263 for (i = 0; i < count; ++i) {
264
265 old = rcu_dereference_protected(fobj->shared[i],
266 dma_resv_held(obj));
267 if (old->context == fence->context ||
268 dma_fence_is_signaled(old))
269 goto replace;
270 }
271
272 BUG_ON(fobj->shared_count >= fobj->shared_max);
273 old = NULL;
274 count++;
275
276 replace:
277 RCU_INIT_POINTER(fobj->shared[i], fence);
278 /* pointer update must be visible before we extend the shared_count */
279 smp_store_mb(fobj->shared_count, count);
280
281 write_seqcount_end(&obj->seq);
282 dma_fence_put(old);
283 }
284 EXPORT_SYMBOL(dma_resv_add_shared_fence);
285
286 /**
287 * dma_resv_add_excl_fence - Add an exclusive fence.
288 * @obj: the reservation object
289 * @fence: the exclusive fence to add
290 *
291 * Add a fence to the exclusive slot. @obj must be locked with dma_resv_lock().
292 * Note that this function replaces all fences attached to @obj, see also
293 * &dma_resv.fence_excl for a discussion of the semantics.
294 */
dma_resv_add_excl_fence(struct dma_resv * obj,struct dma_fence * fence)295 void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence)
296 {
297 struct dma_fence *old_fence = dma_resv_excl_fence(obj);
298 struct dma_resv_list *old;
299 u32 i = 0;
300
301 dma_resv_assert_held(obj);
302
303 old = dma_resv_shared_list(obj);
304 if (old)
305 i = old->shared_count;
306
307 if (fence)
308 dma_fence_get(fence);
309
310 write_seqcount_begin(&obj->seq);
311 /* write_seqcount_begin provides the necessary memory barrier */
312 RCU_INIT_POINTER(obj->fence_excl, fence);
313 if (old)
314 old->shared_count = 0;
315 write_seqcount_end(&obj->seq);
316
317 /* inplace update, no shared fences */
318 while (i--)
319 dma_fence_put(rcu_dereference_protected(old->shared[i],
320 dma_resv_held(obj)));
321
322 dma_fence_put(old_fence);
323 }
324 EXPORT_SYMBOL(dma_resv_add_excl_fence);
325
326 /**
327 * dma_resv_iter_restart_unlocked - restart the unlocked iterator
328 * @cursor: The dma_resv_iter object to restart
329 *
330 * Restart the unlocked iteration by initializing the cursor object.
331 */
dma_resv_iter_restart_unlocked(struct dma_resv_iter * cursor)332 static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor)
333 {
334 cursor->seq = read_seqcount_begin(&cursor->obj->seq);
335 cursor->index = -1;
336 cursor->shared_count = 0;
337 if (cursor->all_fences) {
338 cursor->fences = dma_resv_shared_list(cursor->obj);
339 if (cursor->fences)
340 cursor->shared_count = cursor->fences->shared_count;
341 } else {
342 cursor->fences = NULL;
343 }
344 cursor->is_restarted = true;
345 }
346
347 /**
348 * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj
349 * @cursor: cursor to record the current position
350 *
351 * Return all the fences in the dma_resv object which are not yet signaled.
352 * The returned fence has an extra local reference so will stay alive.
353 * If a concurrent modify is detected the whole iteration is started over again.
354 */
dma_resv_iter_walk_unlocked(struct dma_resv_iter * cursor)355 static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor)
356 {
357 struct dma_resv *obj = cursor->obj;
358
359 do {
360 /* Drop the reference from the previous round */
361 dma_fence_put(cursor->fence);
362
363 if (cursor->index == -1) {
364 cursor->fence = dma_resv_excl_fence(obj);
365 cursor->index++;
366 if (!cursor->fence)
367 continue;
368
369 } else if (!cursor->fences ||
370 cursor->index >= cursor->shared_count) {
371 cursor->fence = NULL;
372 break;
373
374 } else {
375 struct dma_resv_list *fences = cursor->fences;
376 unsigned int idx = cursor->index++;
377
378 cursor->fence = rcu_dereference(fences->shared[idx]);
379 }
380 cursor->fence = dma_fence_get_rcu(cursor->fence);
381 if (!cursor->fence || !dma_fence_is_signaled(cursor->fence))
382 break;
383 } while (true);
384 }
385
386 /**
387 * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj.
388 * @cursor: the cursor with the current position
389 *
390 * Returns the first fence from an unlocked dma_resv obj.
391 */
dma_resv_iter_first_unlocked(struct dma_resv_iter * cursor)392 struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor)
393 {
394 rcu_read_lock();
395 do {
396 dma_resv_iter_restart_unlocked(cursor);
397 dma_resv_iter_walk_unlocked(cursor);
398 } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
399 rcu_read_unlock();
400
401 return cursor->fence;
402 }
403 EXPORT_SYMBOL(dma_resv_iter_first_unlocked);
404
405 /**
406 * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj.
407 * @cursor: the cursor with the current position
408 *
409 * Returns the next fence from an unlocked dma_resv obj.
410 */
dma_resv_iter_next_unlocked(struct dma_resv_iter * cursor)411 struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor)
412 {
413 bool restart;
414
415 rcu_read_lock();
416 cursor->is_restarted = false;
417 restart = read_seqcount_retry(&cursor->obj->seq, cursor->seq);
418 do {
419 if (restart)
420 dma_resv_iter_restart_unlocked(cursor);
421 dma_resv_iter_walk_unlocked(cursor);
422 restart = true;
423 } while (read_seqcount_retry(&cursor->obj->seq, cursor->seq));
424 rcu_read_unlock();
425
426 return cursor->fence;
427 }
428 EXPORT_SYMBOL(dma_resv_iter_next_unlocked);
429
430 /**
431 * dma_resv_iter_first - first fence from a locked dma_resv object
432 * @cursor: cursor to record the current position
433 *
434 * Return the first fence in the dma_resv object while holding the
435 * &dma_resv.lock.
436 */
dma_resv_iter_first(struct dma_resv_iter * cursor)437 struct dma_fence *dma_resv_iter_first(struct dma_resv_iter *cursor)
438 {
439 struct dma_fence *fence;
440
441 dma_resv_assert_held(cursor->obj);
442
443 cursor->index = 0;
444 if (cursor->all_fences)
445 cursor->fences = dma_resv_shared_list(cursor->obj);
446 else
447 cursor->fences = NULL;
448
449 fence = dma_resv_excl_fence(cursor->obj);
450 if (!fence)
451 fence = dma_resv_iter_next(cursor);
452
453 cursor->is_restarted = true;
454 return fence;
455 }
456 EXPORT_SYMBOL_GPL(dma_resv_iter_first);
457
458 /**
459 * dma_resv_iter_next - next fence from a locked dma_resv object
460 * @cursor: cursor to record the current position
461 *
462 * Return the next fences from the dma_resv object while holding the
463 * &dma_resv.lock.
464 */
dma_resv_iter_next(struct dma_resv_iter * cursor)465 struct dma_fence *dma_resv_iter_next(struct dma_resv_iter *cursor)
466 {
467 unsigned int idx;
468
469 dma_resv_assert_held(cursor->obj);
470
471 cursor->is_restarted = false;
472 if (!cursor->fences || cursor->index >= cursor->fences->shared_count)
473 return NULL;
474
475 idx = cursor->index++;
476 return rcu_dereference_protected(cursor->fences->shared[idx],
477 dma_resv_held(cursor->obj));
478 }
479 EXPORT_SYMBOL_GPL(dma_resv_iter_next);
480
481 /**
482 * dma_resv_copy_fences - Copy all fences from src to dst.
483 * @dst: the destination reservation object
484 * @src: the source reservation object
485 *
486 * Copy all fences from src to dst. dst-lock must be held.
487 */
dma_resv_copy_fences(struct dma_resv * dst,struct dma_resv * src)488 int dma_resv_copy_fences(struct dma_resv *dst, struct dma_resv *src)
489 {
490 struct dma_resv_iter cursor;
491 struct dma_resv_list *list;
492 struct dma_fence *f, *excl;
493
494 dma_resv_assert_held(dst);
495
496 list = NULL;
497 excl = NULL;
498
499 dma_resv_iter_begin(&cursor, src, true);
500 dma_resv_for_each_fence_unlocked(&cursor, f) {
501
502 if (dma_resv_iter_is_restarted(&cursor)) {
503 dma_resv_list_free(list);
504 dma_fence_put(excl);
505
506 if (cursor.shared_count) {
507 list = dma_resv_list_alloc(cursor.shared_count);
508 if (!list) {
509 dma_resv_iter_end(&cursor);
510 return -ENOMEM;
511 }
512
513 list->shared_count = 0;
514
515 } else {
516 list = NULL;
517 }
518 excl = NULL;
519 }
520
521 dma_fence_get(f);
522 if (dma_resv_iter_is_exclusive(&cursor))
523 excl = f;
524 else
525 RCU_INIT_POINTER(list->shared[list->shared_count++], f);
526 }
527 dma_resv_iter_end(&cursor);
528
529 write_seqcount_begin(&dst->seq);
530 excl = rcu_replace_pointer(dst->fence_excl, excl, dma_resv_held(dst));
531 list = rcu_replace_pointer(dst->fence, list, dma_resv_held(dst));
532 write_seqcount_end(&dst->seq);
533
534 dma_resv_list_free(list);
535 dma_fence_put(excl);
536
537 return 0;
538 }
539 EXPORT_SYMBOL(dma_resv_copy_fences);
540
541 /**
542 * dma_resv_get_fences - Get an object's shared and exclusive
543 * fences without update side lock held
544 * @obj: the reservation object
545 * @fence_excl: the returned exclusive fence (or NULL)
546 * @shared_count: the number of shared fences returned
547 * @shared: the array of shared fence ptrs returned (array is krealloc'd to
548 * the required size, and must be freed by caller)
549 *
550 * Retrieve all fences from the reservation object. If the pointer for the
551 * exclusive fence is not specified the fence is put into the array of the
552 * shared fences as well. Returns either zero or -ENOMEM.
553 */
dma_resv_get_fences(struct dma_resv * obj,struct dma_fence ** fence_excl,unsigned int * shared_count,struct dma_fence *** shared)554 int dma_resv_get_fences(struct dma_resv *obj, struct dma_fence **fence_excl,
555 unsigned int *shared_count, struct dma_fence ***shared)
556 {
557 struct dma_resv_iter cursor;
558 struct dma_fence *fence;
559
560 *shared_count = 0;
561 *shared = NULL;
562
563 if (fence_excl)
564 *fence_excl = NULL;
565
566 dma_resv_iter_begin(&cursor, obj, true);
567 dma_resv_for_each_fence_unlocked(&cursor, fence) {
568
569 if (dma_resv_iter_is_restarted(&cursor)) {
570 unsigned int count;
571
572 while (*shared_count)
573 dma_fence_put((*shared)[--(*shared_count)]);
574
575 if (fence_excl)
576 dma_fence_put(*fence_excl);
577
578 count = cursor.shared_count;
579 count += fence_excl ? 0 : 1;
580
581 /* Eventually re-allocate the array */
582 *shared = krealloc_array(*shared, count,
583 sizeof(void *),
584 GFP_KERNEL);
585 if (count && !*shared) {
586 dma_resv_iter_end(&cursor);
587 return -ENOMEM;
588 }
589 }
590
591 dma_fence_get(fence);
592 if (dma_resv_iter_is_exclusive(&cursor) && fence_excl)
593 *fence_excl = fence;
594 else
595 (*shared)[(*shared_count)++] = fence;
596 }
597 dma_resv_iter_end(&cursor);
598
599 return 0;
600 }
601 EXPORT_SYMBOL_GPL(dma_resv_get_fences);
602
603 /**
604 * dma_resv_wait_timeout - Wait on reservation's objects
605 * shared and/or exclusive fences.
606 * @obj: the reservation object
607 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
608 * @intr: if true, do interruptible wait
609 * @timeout: timeout value in jiffies or zero to return immediately
610 *
611 * Callers are not required to hold specific locks, but maybe hold
612 * dma_resv_lock() already
613 * RETURNS
614 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
615 * greater than zer on success.
616 */
dma_resv_wait_timeout(struct dma_resv * obj,bool wait_all,bool intr,unsigned long timeout)617 long dma_resv_wait_timeout(struct dma_resv *obj, bool wait_all, bool intr,
618 unsigned long timeout)
619 {
620 long ret = timeout ? timeout : 1;
621 struct dma_resv_iter cursor;
622 struct dma_fence *fence;
623
624 dma_resv_iter_begin(&cursor, obj, wait_all);
625 dma_resv_for_each_fence_unlocked(&cursor, fence) {
626
627 ret = dma_fence_wait_timeout(fence, intr, ret);
628 if (ret <= 0) {
629 dma_resv_iter_end(&cursor);
630 return ret;
631 }
632 }
633 dma_resv_iter_end(&cursor);
634
635 return ret;
636 }
637 EXPORT_SYMBOL_GPL(dma_resv_wait_timeout);
638
639
640 /**
641 * dma_resv_test_signaled - Test if a reservation object's fences have been
642 * signaled.
643 * @obj: the reservation object
644 * @test_all: if true, test all fences, otherwise only test the exclusive
645 * fence
646 *
647 * Callers are not required to hold specific locks, but maybe hold
648 * dma_resv_lock() already.
649 *
650 * RETURNS
651 *
652 * True if all fences signaled, else false.
653 */
dma_resv_test_signaled(struct dma_resv * obj,bool test_all)654 bool dma_resv_test_signaled(struct dma_resv *obj, bool test_all)
655 {
656 struct dma_resv_iter cursor;
657 struct dma_fence *fence;
658
659 dma_resv_iter_begin(&cursor, obj, test_all);
660 dma_resv_for_each_fence_unlocked(&cursor, fence) {
661 dma_resv_iter_end(&cursor);
662 return false;
663 }
664 dma_resv_iter_end(&cursor);
665 return true;
666 }
667 EXPORT_SYMBOL_GPL(dma_resv_test_signaled);
668
669 #if IS_ENABLED(CONFIG_LOCKDEP)
dma_resv_lockdep(void)670 static int __init dma_resv_lockdep(void)
671 {
672 struct mm_struct *mm = mm_alloc();
673 struct ww_acquire_ctx ctx;
674 struct dma_resv obj;
675 struct address_space mapping;
676 int ret;
677
678 if (!mm)
679 return -ENOMEM;
680
681 dma_resv_init(&obj);
682 address_space_init_once(&mapping);
683
684 mmap_read_lock(mm);
685 ww_acquire_init(&ctx, &reservation_ww_class);
686 ret = dma_resv_lock(&obj, &ctx);
687 if (ret == -EDEADLK)
688 dma_resv_lock_slow(&obj, &ctx);
689 fs_reclaim_acquire(GFP_KERNEL);
690 /* for unmap_mapping_range on trylocked buffer objects in shrinkers */
691 i_mmap_lock_write(&mapping);
692 i_mmap_unlock_write(&mapping);
693 #ifdef CONFIG_MMU_NOTIFIER
694 lock_map_acquire(&__mmu_notifier_invalidate_range_start_map);
695 __dma_fence_might_wait();
696 lock_map_release(&__mmu_notifier_invalidate_range_start_map);
697 #else
698 __dma_fence_might_wait();
699 #endif
700 fs_reclaim_release(GFP_KERNEL);
701 ww_mutex_unlock(&obj.lock);
702 ww_acquire_fini(&ctx);
703 mmap_read_unlock(mm);
704
705 mmput(mm);
706
707 return 0;
708 }
709 subsys_initcall(dma_resv_lockdep);
710 #endif
711