1 /*
2 * Copyright © 2008 Intel Corporation
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21 * IN THE SOFTWARE.
22 *
23 * Authors:
24 * Eric Anholt <eric@anholt.net>
25 *
26 */
27
28 #include <linux/types.h>
29 #include <linux/slab.h>
30 #include <linux/mm.h>
31 #include <linux/uaccess.h>
32 #include <linux/fs.h>
33 #include <linux/file.h>
34 #include <linux/module.h>
35 #include <linux/mman.h>
36 #include <linux/pagemap.h>
37 #include <linux/shmem_fs.h>
38 #include <linux/dma-buf.h>
39 #include <linux/dma-buf-map.h>
40 #include <linux/mem_encrypt.h>
41 #include <linux/pagevec.h>
42
43 #include <drm/drm.h>
44 #include <drm/drm_device.h>
45 #include <drm/drm_drv.h>
46 #include <drm/drm_file.h>
47 #include <drm/drm_gem.h>
48 #include <drm/drm_managed.h>
49 #include <drm/drm_print.h>
50 #include <drm/drm_vma_manager.h>
51
52 #include "drm_internal.h"
53
54 /** @file drm_gem.c
55 *
56 * This file provides some of the base ioctls and library routines for
57 * the graphics memory manager implemented by each device driver.
58 *
59 * Because various devices have different requirements in terms of
60 * synchronization and migration strategies, implementing that is left up to
61 * the driver, and all that the general API provides should be generic --
62 * allocating objects, reading/writing data with the cpu, freeing objects.
63 * Even there, platform-dependent optimizations for reading/writing data with
64 * the CPU mean we'll likely hook those out to driver-specific calls. However,
65 * the DRI2 implementation wants to have at least allocate/mmap be generic.
66 *
67 * The goal was to have swap-backed object allocation managed through
68 * struct file. However, file descriptors as handles to a struct file have
69 * two major failings:
70 * - Process limits prevent more than 1024 or so being used at a time by
71 * default.
72 * - Inability to allocate high fds will aggravate the X Server's select()
73 * handling, and likely that of many GL client applications as well.
74 *
75 * This led to a plan of using our own integer IDs (called handles, following
76 * DRM terminology) to mimic fds, and implement the fd syscalls we need as
77 * ioctls. The objects themselves will still include the struct file so
78 * that we can transition to fds if the required kernel infrastructure shows
79 * up at a later date, and as our interface with shmfs for memory allocation.
80 */
81
82 static void
drm_gem_init_release(struct drm_device * dev,void * ptr)83 drm_gem_init_release(struct drm_device *dev, void *ptr)
84 {
85 drm_vma_offset_manager_destroy(dev->vma_offset_manager);
86 }
87
88 /**
89 * drm_gem_init - Initialize the GEM device fields
90 * @dev: drm_devic structure to initialize
91 */
92 int
drm_gem_init(struct drm_device * dev)93 drm_gem_init(struct drm_device *dev)
94 {
95 struct drm_vma_offset_manager *vma_offset_manager;
96
97 mutex_init(&dev->object_name_lock);
98 idr_init_base(&dev->object_name_idr, 1);
99
100 vma_offset_manager = drmm_kzalloc(dev, sizeof(*vma_offset_manager),
101 GFP_KERNEL);
102 if (!vma_offset_manager) {
103 DRM_ERROR("out of memory\n");
104 return -ENOMEM;
105 }
106
107 dev->vma_offset_manager = vma_offset_manager;
108 drm_vma_offset_manager_init(vma_offset_manager,
109 DRM_FILE_PAGE_OFFSET_START,
110 DRM_FILE_PAGE_OFFSET_SIZE);
111
112 return drmm_add_action(dev, drm_gem_init_release, NULL);
113 }
114
115 /**
116 * drm_gem_object_init - initialize an allocated shmem-backed GEM object
117 * @dev: drm_device the object should be initialized for
118 * @obj: drm_gem_object to initialize
119 * @size: object size
120 *
121 * Initialize an already allocated GEM object of the specified size with
122 * shmfs backing store.
123 */
drm_gem_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)124 int drm_gem_object_init(struct drm_device *dev,
125 struct drm_gem_object *obj, size_t size)
126 {
127 struct file *filp;
128
129 drm_gem_private_object_init(dev, obj, size);
130
131 filp = shmem_file_setup("drm mm object", size, VM_NORESERVE);
132 if (IS_ERR(filp))
133 return PTR_ERR(filp);
134
135 obj->filp = filp;
136
137 return 0;
138 }
139 EXPORT_SYMBOL(drm_gem_object_init);
140
141 /**
142 * drm_gem_private_object_init - initialize an allocated private GEM object
143 * @dev: drm_device the object should be initialized for
144 * @obj: drm_gem_object to initialize
145 * @size: object size
146 *
147 * Initialize an already allocated GEM object of the specified size with
148 * no GEM provided backing store. Instead the caller is responsible for
149 * backing the object and handling it.
150 */
drm_gem_private_object_init(struct drm_device * dev,struct drm_gem_object * obj,size_t size)151 void drm_gem_private_object_init(struct drm_device *dev,
152 struct drm_gem_object *obj, size_t size)
153 {
154 BUG_ON((size & (PAGE_SIZE - 1)) != 0);
155
156 obj->dev = dev;
157 obj->filp = NULL;
158
159 kref_init(&obj->refcount);
160 obj->handle_count = 0;
161 obj->size = size;
162 dma_resv_init(&obj->_resv);
163 if (!obj->resv)
164 obj->resv = &obj->_resv;
165
166 drm_vma_node_reset(&obj->vma_node);
167 }
168 EXPORT_SYMBOL(drm_gem_private_object_init);
169
170 static void
drm_gem_remove_prime_handles(struct drm_gem_object * obj,struct drm_file * filp)171 drm_gem_remove_prime_handles(struct drm_gem_object *obj, struct drm_file *filp)
172 {
173 /*
174 * Note: obj->dma_buf can't disappear as long as we still hold a
175 * handle reference in obj->handle_count.
176 */
177 mutex_lock(&filp->prime.lock);
178 if (obj->dma_buf) {
179 drm_prime_remove_buf_handle_locked(&filp->prime,
180 obj->dma_buf);
181 }
182 mutex_unlock(&filp->prime.lock);
183 }
184
185 /**
186 * drm_gem_object_handle_free - release resources bound to userspace handles
187 * @obj: GEM object to clean up.
188 *
189 * Called after the last handle to the object has been closed
190 *
191 * Removes any name for the object. Note that this must be
192 * called before drm_gem_object_free or we'll be touching
193 * freed memory
194 */
drm_gem_object_handle_free(struct drm_gem_object * obj)195 static void drm_gem_object_handle_free(struct drm_gem_object *obj)
196 {
197 struct drm_device *dev = obj->dev;
198
199 /* Remove any name for this object */
200 if (obj->name) {
201 idr_remove(&dev->object_name_idr, obj->name);
202 obj->name = 0;
203 }
204 }
205
drm_gem_object_exported_dma_buf_free(struct drm_gem_object * obj)206 static void drm_gem_object_exported_dma_buf_free(struct drm_gem_object *obj)
207 {
208 /* Unbreak the reference cycle if we have an exported dma_buf. */
209 if (obj->dma_buf) {
210 dma_buf_put(obj->dma_buf);
211 obj->dma_buf = NULL;
212 }
213 }
214
215 static void
drm_gem_object_handle_put_unlocked(struct drm_gem_object * obj)216 drm_gem_object_handle_put_unlocked(struct drm_gem_object *obj)
217 {
218 struct drm_device *dev = obj->dev;
219 bool final = false;
220
221 if (WARN_ON(READ_ONCE(obj->handle_count) == 0))
222 return;
223
224 /*
225 * Must bump handle count first as this may be the last
226 * ref, in which case the object would disappear before we
227 * checked for a name
228 */
229
230 mutex_lock(&dev->object_name_lock);
231 if (--obj->handle_count == 0) {
232 drm_gem_object_handle_free(obj);
233 drm_gem_object_exported_dma_buf_free(obj);
234 final = true;
235 }
236 mutex_unlock(&dev->object_name_lock);
237
238 if (final)
239 drm_gem_object_put(obj);
240 }
241
242 /*
243 * Called at device or object close to release the file's
244 * handle references on objects.
245 */
246 static int
drm_gem_object_release_handle(int id,void * ptr,void * data)247 drm_gem_object_release_handle(int id, void *ptr, void *data)
248 {
249 struct drm_file *file_priv = data;
250 struct drm_gem_object *obj = ptr;
251
252 if (obj->funcs->close)
253 obj->funcs->close(obj, file_priv);
254
255 drm_gem_remove_prime_handles(obj, file_priv);
256 drm_vma_node_revoke(&obj->vma_node, file_priv);
257
258 drm_gem_object_handle_put_unlocked(obj);
259
260 return 0;
261 }
262
263 /**
264 * drm_gem_handle_delete - deletes the given file-private handle
265 * @filp: drm file-private structure to use for the handle look up
266 * @handle: userspace handle to delete
267 *
268 * Removes the GEM handle from the @filp lookup table which has been added with
269 * drm_gem_handle_create(). If this is the last handle also cleans up linked
270 * resources like GEM names.
271 */
272 int
drm_gem_handle_delete(struct drm_file * filp,u32 handle)273 drm_gem_handle_delete(struct drm_file *filp, u32 handle)
274 {
275 struct drm_gem_object *obj;
276
277 spin_lock(&filp->table_lock);
278
279 /* Check if we currently have a reference on the object */
280 obj = idr_replace(&filp->object_idr, NULL, handle);
281 spin_unlock(&filp->table_lock);
282 if (IS_ERR_OR_NULL(obj))
283 return -EINVAL;
284
285 /* Release driver's reference and decrement refcount. */
286 drm_gem_object_release_handle(handle, obj, filp);
287
288 /* And finally make the handle available for future allocations. */
289 spin_lock(&filp->table_lock);
290 idr_remove(&filp->object_idr, handle);
291 spin_unlock(&filp->table_lock);
292
293 return 0;
294 }
295 EXPORT_SYMBOL(drm_gem_handle_delete);
296
297 /**
298 * drm_gem_dumb_map_offset - return the fake mmap offset for a gem object
299 * @file: drm file-private structure containing the gem object
300 * @dev: corresponding drm_device
301 * @handle: gem object handle
302 * @offset: return location for the fake mmap offset
303 *
304 * This implements the &drm_driver.dumb_map_offset kms driver callback for
305 * drivers which use gem to manage their backing storage.
306 *
307 * Returns:
308 * 0 on success or a negative error code on failure.
309 */
drm_gem_dumb_map_offset(struct drm_file * file,struct drm_device * dev,u32 handle,u64 * offset)310 int drm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
311 u32 handle, u64 *offset)
312 {
313 struct drm_gem_object *obj;
314 int ret;
315
316 obj = drm_gem_object_lookup(file, handle);
317 if (!obj)
318 return -ENOENT;
319
320 /* Don't allow imported objects to be mapped */
321 if (obj->import_attach) {
322 ret = -EINVAL;
323 goto out;
324 }
325
326 ret = drm_gem_create_mmap_offset(obj);
327 if (ret)
328 goto out;
329
330 *offset = drm_vma_node_offset_addr(&obj->vma_node);
331 out:
332 drm_gem_object_put(obj);
333
334 return ret;
335 }
336 EXPORT_SYMBOL_GPL(drm_gem_dumb_map_offset);
337
drm_gem_dumb_destroy(struct drm_file * file,struct drm_device * dev,u32 handle)338 int drm_gem_dumb_destroy(struct drm_file *file,
339 struct drm_device *dev,
340 u32 handle)
341 {
342 return drm_gem_handle_delete(file, handle);
343 }
344
345 /**
346 * drm_gem_handle_create_tail - internal functions to create a handle
347 * @file_priv: drm file-private structure to register the handle for
348 * @obj: object to register
349 * @handlep: pointer to return the created handle to the caller
350 *
351 * This expects the &drm_device.object_name_lock to be held already and will
352 * drop it before returning. Used to avoid races in establishing new handles
353 * when importing an object from either an flink name or a dma-buf.
354 *
355 * Handles must be release again through drm_gem_handle_delete(). This is done
356 * when userspace closes @file_priv for all attached handles, or through the
357 * GEM_CLOSE ioctl for individual handles.
358 */
359 int
drm_gem_handle_create_tail(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)360 drm_gem_handle_create_tail(struct drm_file *file_priv,
361 struct drm_gem_object *obj,
362 u32 *handlep)
363 {
364 struct drm_device *dev = obj->dev;
365 u32 handle;
366 int ret;
367
368 WARN_ON(!mutex_is_locked(&dev->object_name_lock));
369 if (obj->handle_count++ == 0)
370 drm_gem_object_get(obj);
371
372 /*
373 * Get the user-visible handle using idr. Preload and perform
374 * allocation under our spinlock.
375 */
376 idr_preload(GFP_KERNEL);
377 spin_lock(&file_priv->table_lock);
378
379 ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
380
381 spin_unlock(&file_priv->table_lock);
382 idr_preload_end();
383
384 mutex_unlock(&dev->object_name_lock);
385 if (ret < 0)
386 goto err_unref;
387
388 handle = ret;
389
390 ret = drm_vma_node_allow(&obj->vma_node, file_priv);
391 if (ret)
392 goto err_remove;
393
394 if (obj->funcs->open) {
395 ret = obj->funcs->open(obj, file_priv);
396 if (ret)
397 goto err_revoke;
398 }
399
400 *handlep = handle;
401 return 0;
402
403 err_revoke:
404 drm_vma_node_revoke(&obj->vma_node, file_priv);
405 err_remove:
406 spin_lock(&file_priv->table_lock);
407 idr_remove(&file_priv->object_idr, handle);
408 spin_unlock(&file_priv->table_lock);
409 err_unref:
410 drm_gem_object_handle_put_unlocked(obj);
411 return ret;
412 }
413
414 /**
415 * drm_gem_handle_create - create a gem handle for an object
416 * @file_priv: drm file-private structure to register the handle for
417 * @obj: object to register
418 * @handlep: pointer to return the created handle to the caller
419 *
420 * Create a handle for this object. This adds a handle reference to the object,
421 * which includes a regular reference count. Callers will likely want to
422 * dereference the object afterwards.
423 *
424 * Since this publishes @obj to userspace it must be fully set up by this point,
425 * drivers must call this last in their buffer object creation callbacks.
426 */
drm_gem_handle_create(struct drm_file * file_priv,struct drm_gem_object * obj,u32 * handlep)427 int drm_gem_handle_create(struct drm_file *file_priv,
428 struct drm_gem_object *obj,
429 u32 *handlep)
430 {
431 mutex_lock(&obj->dev->object_name_lock);
432
433 return drm_gem_handle_create_tail(file_priv, obj, handlep);
434 }
435 EXPORT_SYMBOL(drm_gem_handle_create);
436
437
438 /**
439 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
440 * @obj: obj in question
441 *
442 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
443 *
444 * Note that drm_gem_object_release() already calls this function, so drivers
445 * don't have to take care of releasing the mmap offset themselves when freeing
446 * the GEM object.
447 */
448 void
drm_gem_free_mmap_offset(struct drm_gem_object * obj)449 drm_gem_free_mmap_offset(struct drm_gem_object *obj)
450 {
451 struct drm_device *dev = obj->dev;
452
453 drm_vma_offset_remove(dev->vma_offset_manager, &obj->vma_node);
454 }
455 EXPORT_SYMBOL(drm_gem_free_mmap_offset);
456
457 /**
458 * drm_gem_create_mmap_offset_size - create a fake mmap offset for an object
459 * @obj: obj in question
460 * @size: the virtual size
461 *
462 * GEM memory mapping works by handing back to userspace a fake mmap offset
463 * it can use in a subsequent mmap(2) call. The DRM core code then looks
464 * up the object based on the offset and sets up the various memory mapping
465 * structures.
466 *
467 * This routine allocates and attaches a fake offset for @obj, in cases where
468 * the virtual size differs from the physical size (ie. &drm_gem_object.size).
469 * Otherwise just use drm_gem_create_mmap_offset().
470 *
471 * This function is idempotent and handles an already allocated mmap offset
472 * transparently. Drivers do not need to check for this case.
473 */
474 int
drm_gem_create_mmap_offset_size(struct drm_gem_object * obj,size_t size)475 drm_gem_create_mmap_offset_size(struct drm_gem_object *obj, size_t size)
476 {
477 struct drm_device *dev = obj->dev;
478
479 return drm_vma_offset_add(dev->vma_offset_manager, &obj->vma_node,
480 size / PAGE_SIZE);
481 }
482 EXPORT_SYMBOL(drm_gem_create_mmap_offset_size);
483
484 /**
485 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
486 * @obj: obj in question
487 *
488 * GEM memory mapping works by handing back to userspace a fake mmap offset
489 * it can use in a subsequent mmap(2) call. The DRM core code then looks
490 * up the object based on the offset and sets up the various memory mapping
491 * structures.
492 *
493 * This routine allocates and attaches a fake offset for @obj.
494 *
495 * Drivers can call drm_gem_free_mmap_offset() before freeing @obj to release
496 * the fake offset again.
497 */
drm_gem_create_mmap_offset(struct drm_gem_object * obj)498 int drm_gem_create_mmap_offset(struct drm_gem_object *obj)
499 {
500 return drm_gem_create_mmap_offset_size(obj, obj->size);
501 }
502 EXPORT_SYMBOL(drm_gem_create_mmap_offset);
503
504 /*
505 * Move pages to appropriate lru and release the pagevec, decrementing the
506 * ref count of those pages.
507 */
drm_gem_check_release_pagevec(struct pagevec * pvec)508 static void drm_gem_check_release_pagevec(struct pagevec *pvec)
509 {
510 check_move_unevictable_pages(pvec);
511 __pagevec_release(pvec);
512 cond_resched();
513 }
514
515 /**
516 * drm_gem_get_pages - helper to allocate backing pages for a GEM object
517 * from shmem
518 * @obj: obj in question
519 *
520 * This reads the page-array of the shmem-backing storage of the given gem
521 * object. An array of pages is returned. If a page is not allocated or
522 * swapped-out, this will allocate/swap-in the required pages. Note that the
523 * whole object is covered by the page-array and pinned in memory.
524 *
525 * Use drm_gem_put_pages() to release the array and unpin all pages.
526 *
527 * This uses the GFP-mask set on the shmem-mapping (see mapping_set_gfp_mask()).
528 * If you require other GFP-masks, you have to do those allocations yourself.
529 *
530 * Note that you are not allowed to change gfp-zones during runtime. That is,
531 * shmem_read_mapping_page_gfp() must be called with the same gfp_zone(gfp) as
532 * set during initialization. If you have special zone constraints, set them
533 * after drm_gem_object_init() via mapping_set_gfp_mask(). shmem-core takes care
534 * to keep pages in the required zone during swap-in.
535 *
536 * This function is only valid on objects initialized with
537 * drm_gem_object_init(), but not for those initialized with
538 * drm_gem_private_object_init() only.
539 */
drm_gem_get_pages(struct drm_gem_object * obj)540 struct page **drm_gem_get_pages(struct drm_gem_object *obj)
541 {
542 struct address_space *mapping;
543 struct page *p, **pages;
544 struct pagevec pvec;
545 int i, npages;
546
547
548 if (WARN_ON(!obj->filp))
549 return ERR_PTR(-EINVAL);
550
551 /* This is the shared memory object that backs the GEM resource */
552 mapping = obj->filp->f_mapping;
553
554 /* We already BUG_ON() for non-page-aligned sizes in
555 * drm_gem_object_init(), so we should never hit this unless
556 * driver author is doing something really wrong:
557 */
558 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
559
560 npages = obj->size >> PAGE_SHIFT;
561
562 pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
563 if (pages == NULL)
564 return ERR_PTR(-ENOMEM);
565
566 mapping_set_unevictable(mapping);
567
568 for (i = 0; i < npages; i++) {
569 p = shmem_read_mapping_page(mapping, i);
570 if (IS_ERR(p))
571 goto fail;
572 pages[i] = p;
573
574 /* Make sure shmem keeps __GFP_DMA32 allocated pages in the
575 * correct region during swapin. Note that this requires
576 * __GFP_DMA32 to be set in mapping_gfp_mask(inode->i_mapping)
577 * so shmem can relocate pages during swapin if required.
578 */
579 BUG_ON(mapping_gfp_constraint(mapping, __GFP_DMA32) &&
580 (page_to_pfn(p) >= 0x00100000UL));
581 }
582
583 return pages;
584
585 fail:
586 mapping_clear_unevictable(mapping);
587 pagevec_init(&pvec);
588 while (i--) {
589 if (!pagevec_add(&pvec, pages[i]))
590 drm_gem_check_release_pagevec(&pvec);
591 }
592 if (pagevec_count(&pvec))
593 drm_gem_check_release_pagevec(&pvec);
594
595 kvfree(pages);
596 return ERR_CAST(p);
597 }
598 EXPORT_SYMBOL(drm_gem_get_pages);
599
600 /**
601 * drm_gem_put_pages - helper to free backing pages for a GEM object
602 * @obj: obj in question
603 * @pages: pages to free
604 * @dirty: if true, pages will be marked as dirty
605 * @accessed: if true, the pages will be marked as accessed
606 */
drm_gem_put_pages(struct drm_gem_object * obj,struct page ** pages,bool dirty,bool accessed)607 void drm_gem_put_pages(struct drm_gem_object *obj, struct page **pages,
608 bool dirty, bool accessed)
609 {
610 int i, npages;
611 struct address_space *mapping;
612 struct pagevec pvec;
613
614 mapping = file_inode(obj->filp)->i_mapping;
615 mapping_clear_unevictable(mapping);
616
617 /* We already BUG_ON() for non-page-aligned sizes in
618 * drm_gem_object_init(), so we should never hit this unless
619 * driver author is doing something really wrong:
620 */
621 WARN_ON((obj->size & (PAGE_SIZE - 1)) != 0);
622
623 npages = obj->size >> PAGE_SHIFT;
624
625 pagevec_init(&pvec);
626 for (i = 0; i < npages; i++) {
627 if (!pages[i])
628 continue;
629
630 if (dirty)
631 set_page_dirty(pages[i]);
632
633 if (accessed)
634 mark_page_accessed(pages[i]);
635
636 /* Undo the reference we took when populating the table */
637 if (!pagevec_add(&pvec, pages[i]))
638 drm_gem_check_release_pagevec(&pvec);
639 }
640 if (pagevec_count(&pvec))
641 drm_gem_check_release_pagevec(&pvec);
642
643 kvfree(pages);
644 }
645 EXPORT_SYMBOL(drm_gem_put_pages);
646
objects_lookup(struct drm_file * filp,u32 * handle,int count,struct drm_gem_object ** objs)647 static int objects_lookup(struct drm_file *filp, u32 *handle, int count,
648 struct drm_gem_object **objs)
649 {
650 int i, ret = 0;
651 struct drm_gem_object *obj;
652
653 spin_lock(&filp->table_lock);
654
655 for (i = 0; i < count; i++) {
656 /* Check if we currently have a reference on the object */
657 obj = idr_find(&filp->object_idr, handle[i]);
658 if (!obj) {
659 ret = -ENOENT;
660 break;
661 }
662 drm_gem_object_get(obj);
663 objs[i] = obj;
664 }
665 spin_unlock(&filp->table_lock);
666
667 return ret;
668 }
669
670 /**
671 * drm_gem_objects_lookup - look up GEM objects from an array of handles
672 * @filp: DRM file private date
673 * @bo_handles: user pointer to array of userspace handle
674 * @count: size of handle array
675 * @objs_out: returned pointer to array of drm_gem_object pointers
676 *
677 * Takes an array of userspace handles and returns a newly allocated array of
678 * GEM objects.
679 *
680 * For a single handle lookup, use drm_gem_object_lookup().
681 *
682 * Returns:
683 *
684 * @objs filled in with GEM object pointers. Returned GEM objects need to be
685 * released with drm_gem_object_put(). -ENOENT is returned on a lookup
686 * failure. 0 is returned on success.
687 *
688 */
drm_gem_objects_lookup(struct drm_file * filp,void __user * bo_handles,int count,struct drm_gem_object *** objs_out)689 int drm_gem_objects_lookup(struct drm_file *filp, void __user *bo_handles,
690 int count, struct drm_gem_object ***objs_out)
691 {
692 int ret;
693 u32 *handles;
694 struct drm_gem_object **objs;
695
696 if (!count)
697 return 0;
698
699 objs = kvmalloc_array(count, sizeof(struct drm_gem_object *),
700 GFP_KERNEL | __GFP_ZERO);
701 if (!objs)
702 return -ENOMEM;
703
704 *objs_out = objs;
705
706 handles = kvmalloc_array(count, sizeof(u32), GFP_KERNEL);
707 if (!handles) {
708 ret = -ENOMEM;
709 goto out;
710 }
711
712 if (copy_from_user(handles, bo_handles, count * sizeof(u32))) {
713 ret = -EFAULT;
714 DRM_DEBUG("Failed to copy in GEM handles\n");
715 goto out;
716 }
717
718 ret = objects_lookup(filp, handles, count, objs);
719 out:
720 kvfree(handles);
721 return ret;
722
723 }
724 EXPORT_SYMBOL(drm_gem_objects_lookup);
725
726 /**
727 * drm_gem_object_lookup - look up a GEM object from its handle
728 * @filp: DRM file private date
729 * @handle: userspace handle
730 *
731 * Returns:
732 *
733 * A reference to the object named by the handle if such exists on @filp, NULL
734 * otherwise.
735 *
736 * If looking up an array of handles, use drm_gem_objects_lookup().
737 */
738 struct drm_gem_object *
drm_gem_object_lookup(struct drm_file * filp,u32 handle)739 drm_gem_object_lookup(struct drm_file *filp, u32 handle)
740 {
741 struct drm_gem_object *obj = NULL;
742
743 objects_lookup(filp, &handle, 1, &obj);
744 return obj;
745 }
746 EXPORT_SYMBOL(drm_gem_object_lookup);
747
748 /**
749 * drm_gem_dma_resv_wait - Wait on GEM object's reservation's objects
750 * shared and/or exclusive fences.
751 * @filep: DRM file private date
752 * @handle: userspace handle
753 * @wait_all: if true, wait on all fences, else wait on just exclusive fence
754 * @timeout: timeout value in jiffies or zero to return immediately
755 *
756 * Returns:
757 *
758 * Returns -ERESTARTSYS if interrupted, 0 if the wait timed out, or
759 * greater than 0 on success.
760 */
drm_gem_dma_resv_wait(struct drm_file * filep,u32 handle,bool wait_all,unsigned long timeout)761 long drm_gem_dma_resv_wait(struct drm_file *filep, u32 handle,
762 bool wait_all, unsigned long timeout)
763 {
764 long ret;
765 struct drm_gem_object *obj;
766
767 obj = drm_gem_object_lookup(filep, handle);
768 if (!obj) {
769 DRM_DEBUG("Failed to look up GEM BO %d\n", handle);
770 return -EINVAL;
771 }
772
773 ret = dma_resv_wait_timeout(obj->resv, wait_all, true, timeout);
774 if (ret == 0)
775 ret = -ETIME;
776 else if (ret > 0)
777 ret = 0;
778
779 drm_gem_object_put(obj);
780
781 return ret;
782 }
783 EXPORT_SYMBOL(drm_gem_dma_resv_wait);
784
785 /**
786 * drm_gem_close_ioctl - implementation of the GEM_CLOSE ioctl
787 * @dev: drm_device
788 * @data: ioctl data
789 * @file_priv: drm file-private structure
790 *
791 * Releases the handle to an mm object.
792 */
793 int
drm_gem_close_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)794 drm_gem_close_ioctl(struct drm_device *dev, void *data,
795 struct drm_file *file_priv)
796 {
797 struct drm_gem_close *args = data;
798 int ret;
799
800 if (!drm_core_check_feature(dev, DRIVER_GEM))
801 return -EOPNOTSUPP;
802
803 ret = drm_gem_handle_delete(file_priv, args->handle);
804
805 return ret;
806 }
807
808 /**
809 * drm_gem_flink_ioctl - implementation of the GEM_FLINK ioctl
810 * @dev: drm_device
811 * @data: ioctl data
812 * @file_priv: drm file-private structure
813 *
814 * Create a global name for an object, returning the name.
815 *
816 * Note that the name does not hold a reference; when the object
817 * is freed, the name goes away.
818 */
819 int
drm_gem_flink_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)820 drm_gem_flink_ioctl(struct drm_device *dev, void *data,
821 struct drm_file *file_priv)
822 {
823 struct drm_gem_flink *args = data;
824 struct drm_gem_object *obj;
825 int ret;
826
827 if (!drm_core_check_feature(dev, DRIVER_GEM))
828 return -EOPNOTSUPP;
829
830 obj = drm_gem_object_lookup(file_priv, args->handle);
831 if (obj == NULL)
832 return -ENOENT;
833
834 mutex_lock(&dev->object_name_lock);
835 /* prevent races with concurrent gem_close. */
836 if (obj->handle_count == 0) {
837 ret = -ENOENT;
838 goto err;
839 }
840
841 if (!obj->name) {
842 ret = idr_alloc(&dev->object_name_idr, obj, 1, 0, GFP_KERNEL);
843 if (ret < 0)
844 goto err;
845
846 obj->name = ret;
847 }
848
849 args->name = (uint64_t) obj->name;
850 ret = 0;
851
852 err:
853 mutex_unlock(&dev->object_name_lock);
854 drm_gem_object_put(obj);
855 return ret;
856 }
857
858 /**
859 * drm_gem_open_ioctl - implementation of the GEM_OPEN ioctl
860 * @dev: drm_device
861 * @data: ioctl data
862 * @file_priv: drm file-private structure
863 *
864 * Open an object using the global name, returning a handle and the size.
865 *
866 * This handle (of course) holds a reference to the object, so the object
867 * will not go away until the handle is deleted.
868 */
869 int
drm_gem_open_ioctl(struct drm_device * dev,void * data,struct drm_file * file_priv)870 drm_gem_open_ioctl(struct drm_device *dev, void *data,
871 struct drm_file *file_priv)
872 {
873 struct drm_gem_open *args = data;
874 struct drm_gem_object *obj;
875 int ret;
876 u32 handle;
877
878 if (!drm_core_check_feature(dev, DRIVER_GEM))
879 return -EOPNOTSUPP;
880
881 mutex_lock(&dev->object_name_lock);
882 obj = idr_find(&dev->object_name_idr, (int) args->name);
883 if (obj) {
884 drm_gem_object_get(obj);
885 } else {
886 mutex_unlock(&dev->object_name_lock);
887 return -ENOENT;
888 }
889
890 /* drm_gem_handle_create_tail unlocks dev->object_name_lock. */
891 ret = drm_gem_handle_create_tail(file_priv, obj, &handle);
892 if (ret)
893 goto err;
894
895 args->handle = handle;
896 args->size = obj->size;
897
898 err:
899 drm_gem_object_put(obj);
900 return ret;
901 }
902
903 /**
904 * drm_gem_open - initializes GEM file-private structures at devnode open time
905 * @dev: drm_device which is being opened by userspace
906 * @file_private: drm file-private structure to set up
907 *
908 * Called at device open time, sets up the structure for handling refcounting
909 * of mm objects.
910 */
911 void
drm_gem_open(struct drm_device * dev,struct drm_file * file_private)912 drm_gem_open(struct drm_device *dev, struct drm_file *file_private)
913 {
914 idr_init_base(&file_private->object_idr, 1);
915 spin_lock_init(&file_private->table_lock);
916 }
917
918 /**
919 * drm_gem_release - release file-private GEM resources
920 * @dev: drm_device which is being closed by userspace
921 * @file_private: drm file-private structure to clean up
922 *
923 * Called at close time when the filp is going away.
924 *
925 * Releases any remaining references on objects by this filp.
926 */
927 void
drm_gem_release(struct drm_device * dev,struct drm_file * file_private)928 drm_gem_release(struct drm_device *dev, struct drm_file *file_private)
929 {
930 idr_for_each(&file_private->object_idr,
931 &drm_gem_object_release_handle, file_private);
932 idr_destroy(&file_private->object_idr);
933 }
934
935 /**
936 * drm_gem_object_release - release GEM buffer object resources
937 * @obj: GEM buffer object
938 *
939 * This releases any structures and resources used by @obj and is the inverse of
940 * drm_gem_object_init().
941 */
942 void
drm_gem_object_release(struct drm_gem_object * obj)943 drm_gem_object_release(struct drm_gem_object *obj)
944 {
945 WARN_ON(obj->dma_buf);
946
947 if (obj->filp)
948 fput(obj->filp);
949
950 dma_resv_fini(&obj->_resv);
951 drm_gem_free_mmap_offset(obj);
952 }
953 EXPORT_SYMBOL(drm_gem_object_release);
954
955 /**
956 * drm_gem_object_free - free a GEM object
957 * @kref: kref of the object to free
958 *
959 * Called after the last reference to the object has been lost.
960 *
961 * Frees the object
962 */
963 void
drm_gem_object_free(struct kref * kref)964 drm_gem_object_free(struct kref *kref)
965 {
966 struct drm_gem_object *obj =
967 container_of(kref, struct drm_gem_object, refcount);
968
969 if (WARN_ON(!obj->funcs->free))
970 return;
971
972 obj->funcs->free(obj);
973 }
974 EXPORT_SYMBOL(drm_gem_object_free);
975
976 /**
977 * drm_gem_vm_open - vma->ops->open implementation for GEM
978 * @vma: VM area structure
979 *
980 * This function implements the #vm_operations_struct open() callback for GEM
981 * drivers. This must be used together with drm_gem_vm_close().
982 */
drm_gem_vm_open(struct vm_area_struct * vma)983 void drm_gem_vm_open(struct vm_area_struct *vma)
984 {
985 struct drm_gem_object *obj = vma->vm_private_data;
986
987 drm_gem_object_get(obj);
988 }
989 EXPORT_SYMBOL(drm_gem_vm_open);
990
991 /**
992 * drm_gem_vm_close - vma->ops->close implementation for GEM
993 * @vma: VM area structure
994 *
995 * This function implements the #vm_operations_struct close() callback for GEM
996 * drivers. This must be used together with drm_gem_vm_open().
997 */
drm_gem_vm_close(struct vm_area_struct * vma)998 void drm_gem_vm_close(struct vm_area_struct *vma)
999 {
1000 struct drm_gem_object *obj = vma->vm_private_data;
1001
1002 drm_gem_object_put(obj);
1003 }
1004 EXPORT_SYMBOL(drm_gem_vm_close);
1005
1006 /**
1007 * drm_gem_mmap_obj - memory map a GEM object
1008 * @obj: the GEM object to map
1009 * @obj_size: the object size to be mapped, in bytes
1010 * @vma: VMA for the area to be mapped
1011 *
1012 * Set up the VMA to prepare mapping of the GEM object using the GEM object's
1013 * vm_ops. Depending on their requirements, GEM objects can either
1014 * provide a fault handler in their vm_ops (in which case any accesses to
1015 * the object will be trapped, to perform migration, GTT binding, surface
1016 * register allocation, or performance monitoring), or mmap the buffer memory
1017 * synchronously after calling drm_gem_mmap_obj.
1018 *
1019 * This function is mainly intended to implement the DMABUF mmap operation, when
1020 * the GEM object is not looked up based on its fake offset. To implement the
1021 * DRM mmap operation, drivers should use the drm_gem_mmap() function.
1022 *
1023 * drm_gem_mmap_obj() assumes the user is granted access to the buffer while
1024 * drm_gem_mmap() prevents unprivileged users from mapping random objects. So
1025 * callers must verify access restrictions before calling this helper.
1026 *
1027 * Return 0 or success or -EINVAL if the object size is smaller than the VMA
1028 * size, or if no vm_ops are provided.
1029 */
drm_gem_mmap_obj(struct drm_gem_object * obj,unsigned long obj_size,struct vm_area_struct * vma)1030 int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
1031 struct vm_area_struct *vma)
1032 {
1033 int ret;
1034
1035 /* Check for valid size. */
1036 if (obj_size < vma->vm_end - vma->vm_start)
1037 return -EINVAL;
1038
1039 /* Take a ref for this mapping of the object, so that the fault
1040 * handler can dereference the mmap offset's pointer to the object.
1041 * This reference is cleaned up by the corresponding vm_close
1042 * (which should happen whether the vma was created by this call, or
1043 * by a vm_open due to mremap or partial unmap or whatever).
1044 */
1045 drm_gem_object_get(obj);
1046
1047 vma->vm_private_data = obj;
1048 vma->vm_ops = obj->funcs->vm_ops;
1049
1050 if (obj->funcs->mmap) {
1051 ret = obj->funcs->mmap(obj, vma);
1052 if (ret)
1053 goto err_drm_gem_object_put;
1054 WARN_ON(!(vma->vm_flags & VM_DONTEXPAND));
1055 } else {
1056 if (!vma->vm_ops) {
1057 ret = -EINVAL;
1058 goto err_drm_gem_object_put;
1059 }
1060
1061 vma->vm_flags |= VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP;
1062 vma->vm_page_prot = pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
1063 vma->vm_page_prot = pgprot_decrypted(vma->vm_page_prot);
1064 }
1065
1066 return 0;
1067
1068 err_drm_gem_object_put:
1069 drm_gem_object_put(obj);
1070 return ret;
1071 }
1072 EXPORT_SYMBOL(drm_gem_mmap_obj);
1073
1074 /**
1075 * drm_gem_mmap - memory map routine for GEM objects
1076 * @filp: DRM file pointer
1077 * @vma: VMA for the area to be mapped
1078 *
1079 * If a driver supports GEM object mapping, mmap calls on the DRM file
1080 * descriptor will end up here.
1081 *
1082 * Look up the GEM object based on the offset passed in (vma->vm_pgoff will
1083 * contain the fake offset we created when the GTT map ioctl was called on
1084 * the object) and map it with a call to drm_gem_mmap_obj().
1085 *
1086 * If the caller is not granted access to the buffer object, the mmap will fail
1087 * with EACCES. Please see the vma manager for more information.
1088 */
drm_gem_mmap(struct file * filp,struct vm_area_struct * vma)1089 int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
1090 {
1091 struct drm_file *priv = filp->private_data;
1092 struct drm_device *dev = priv->minor->dev;
1093 struct drm_gem_object *obj = NULL;
1094 struct drm_vma_offset_node *node;
1095 int ret;
1096
1097 if (drm_dev_is_unplugged(dev))
1098 return -ENODEV;
1099
1100 drm_vma_offset_lock_lookup(dev->vma_offset_manager);
1101 node = drm_vma_offset_exact_lookup_locked(dev->vma_offset_manager,
1102 vma->vm_pgoff,
1103 vma_pages(vma));
1104 if (likely(node)) {
1105 obj = container_of(node, struct drm_gem_object, vma_node);
1106 /*
1107 * When the object is being freed, after it hits 0-refcnt it
1108 * proceeds to tear down the object. In the process it will
1109 * attempt to remove the VMA offset and so acquire this
1110 * mgr->vm_lock. Therefore if we find an object with a 0-refcnt
1111 * that matches our range, we know it is in the process of being
1112 * destroyed and will be freed as soon as we release the lock -
1113 * so we have to check for the 0-refcnted object and treat it as
1114 * invalid.
1115 */
1116 if (!kref_get_unless_zero(&obj->refcount))
1117 obj = NULL;
1118 }
1119 drm_vma_offset_unlock_lookup(dev->vma_offset_manager);
1120
1121 if (!obj)
1122 return -EINVAL;
1123
1124 if (!drm_vma_node_is_allowed(node, priv)) {
1125 drm_gem_object_put(obj);
1126 return -EACCES;
1127 }
1128
1129 ret = drm_gem_mmap_obj(obj, drm_vma_node_size(node) << PAGE_SHIFT,
1130 vma);
1131
1132 drm_gem_object_put(obj);
1133
1134 return ret;
1135 }
1136 EXPORT_SYMBOL(drm_gem_mmap);
1137
drm_gem_print_info(struct drm_printer * p,unsigned int indent,const struct drm_gem_object * obj)1138 void drm_gem_print_info(struct drm_printer *p, unsigned int indent,
1139 const struct drm_gem_object *obj)
1140 {
1141 drm_printf_indent(p, indent, "name=%d\n", obj->name);
1142 drm_printf_indent(p, indent, "refcount=%u\n",
1143 kref_read(&obj->refcount));
1144 drm_printf_indent(p, indent, "start=%08lx\n",
1145 drm_vma_node_start(&obj->vma_node));
1146 drm_printf_indent(p, indent, "size=%zu\n", obj->size);
1147 drm_printf_indent(p, indent, "imported=%s\n",
1148 obj->import_attach ? "yes" : "no");
1149
1150 if (obj->funcs->print_info)
1151 obj->funcs->print_info(p, indent, obj);
1152 }
1153
drm_gem_pin(struct drm_gem_object * obj)1154 int drm_gem_pin(struct drm_gem_object *obj)
1155 {
1156 if (obj->funcs->pin)
1157 return obj->funcs->pin(obj);
1158 else
1159 return 0;
1160 }
1161
drm_gem_unpin(struct drm_gem_object * obj)1162 void drm_gem_unpin(struct drm_gem_object *obj)
1163 {
1164 if (obj->funcs->unpin)
1165 obj->funcs->unpin(obj);
1166 }
1167
drm_gem_vmap(struct drm_gem_object * obj,struct dma_buf_map * map)1168 int drm_gem_vmap(struct drm_gem_object *obj, struct dma_buf_map *map)
1169 {
1170 int ret;
1171
1172 if (!obj->funcs->vmap)
1173 return -EOPNOTSUPP;
1174
1175 ret = obj->funcs->vmap(obj, map);
1176 if (ret)
1177 return ret;
1178 else if (dma_buf_map_is_null(map))
1179 return -ENOMEM;
1180
1181 return 0;
1182 }
1183 EXPORT_SYMBOL(drm_gem_vmap);
1184
drm_gem_vunmap(struct drm_gem_object * obj,struct dma_buf_map * map)1185 void drm_gem_vunmap(struct drm_gem_object *obj, struct dma_buf_map *map)
1186 {
1187 if (dma_buf_map_is_null(map))
1188 return;
1189
1190 if (obj->funcs->vunmap)
1191 obj->funcs->vunmap(obj, map);
1192
1193 /* Always set the mapping to NULL. Callers may rely on this. */
1194 dma_buf_map_clear(map);
1195 }
1196 EXPORT_SYMBOL(drm_gem_vunmap);
1197
1198 /**
1199 * drm_gem_lock_reservations - Sets up the ww context and acquires
1200 * the lock on an array of GEM objects.
1201 *
1202 * Once you've locked your reservations, you'll want to set up space
1203 * for your shared fences (if applicable), submit your job, then
1204 * drm_gem_unlock_reservations().
1205 *
1206 * @objs: drm_gem_objects to lock
1207 * @count: Number of objects in @objs
1208 * @acquire_ctx: struct ww_acquire_ctx that will be initialized as
1209 * part of tracking this set of locked reservations.
1210 */
1211 int
drm_gem_lock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1212 drm_gem_lock_reservations(struct drm_gem_object **objs, int count,
1213 struct ww_acquire_ctx *acquire_ctx)
1214 {
1215 int contended = -1;
1216 int i, ret;
1217
1218 ww_acquire_init(acquire_ctx, &reservation_ww_class);
1219
1220 retry:
1221 if (contended != -1) {
1222 struct drm_gem_object *obj = objs[contended];
1223
1224 ret = dma_resv_lock_slow_interruptible(obj->resv,
1225 acquire_ctx);
1226 if (ret) {
1227 ww_acquire_done(acquire_ctx);
1228 return ret;
1229 }
1230 }
1231
1232 for (i = 0; i < count; i++) {
1233 if (i == contended)
1234 continue;
1235
1236 ret = dma_resv_lock_interruptible(objs[i]->resv,
1237 acquire_ctx);
1238 if (ret) {
1239 int j;
1240
1241 for (j = 0; j < i; j++)
1242 dma_resv_unlock(objs[j]->resv);
1243
1244 if (contended != -1 && contended >= i)
1245 dma_resv_unlock(objs[contended]->resv);
1246
1247 if (ret == -EDEADLK) {
1248 contended = i;
1249 goto retry;
1250 }
1251
1252 ww_acquire_done(acquire_ctx);
1253 return ret;
1254 }
1255 }
1256
1257 ww_acquire_done(acquire_ctx);
1258
1259 return 0;
1260 }
1261 EXPORT_SYMBOL(drm_gem_lock_reservations);
1262
1263 void
drm_gem_unlock_reservations(struct drm_gem_object ** objs,int count,struct ww_acquire_ctx * acquire_ctx)1264 drm_gem_unlock_reservations(struct drm_gem_object **objs, int count,
1265 struct ww_acquire_ctx *acquire_ctx)
1266 {
1267 int i;
1268
1269 for (i = 0; i < count; i++)
1270 dma_resv_unlock(objs[i]->resv);
1271
1272 ww_acquire_fini(acquire_ctx);
1273 }
1274 EXPORT_SYMBOL(drm_gem_unlock_reservations);
1275
1276 /**
1277 * drm_gem_fence_array_add - Adds the fence to an array of fences to be
1278 * waited on, deduplicating fences from the same context.
1279 *
1280 * @fence_array: array of dma_fence * for the job to block on.
1281 * @fence: the dma_fence to add to the list of dependencies.
1282 *
1283 * This functions consumes the reference for @fence both on success and error
1284 * cases.
1285 *
1286 * Returns:
1287 * 0 on success, or an error on failing to expand the array.
1288 */
drm_gem_fence_array_add(struct xarray * fence_array,struct dma_fence * fence)1289 int drm_gem_fence_array_add(struct xarray *fence_array,
1290 struct dma_fence *fence)
1291 {
1292 struct dma_fence *entry;
1293 unsigned long index;
1294 u32 id = 0;
1295 int ret;
1296
1297 if (!fence)
1298 return 0;
1299
1300 /* Deduplicate if we already depend on a fence from the same context.
1301 * This lets the size of the array of deps scale with the number of
1302 * engines involved, rather than the number of BOs.
1303 */
1304 xa_for_each(fence_array, index, entry) {
1305 if (entry->context != fence->context)
1306 continue;
1307
1308 if (dma_fence_is_later(fence, entry)) {
1309 dma_fence_put(entry);
1310 xa_store(fence_array, index, fence, GFP_KERNEL);
1311 } else {
1312 dma_fence_put(fence);
1313 }
1314 return 0;
1315 }
1316
1317 ret = xa_alloc(fence_array, &id, fence, xa_limit_32b, GFP_KERNEL);
1318 if (ret != 0)
1319 dma_fence_put(fence);
1320
1321 return ret;
1322 }
1323 EXPORT_SYMBOL(drm_gem_fence_array_add);
1324
1325 /**
1326 * drm_gem_fence_array_add_implicit - Adds the implicit dependencies tracked
1327 * in the GEM object's reservation object to an array of dma_fences for use in
1328 * scheduling a rendering job.
1329 *
1330 * This should be called after drm_gem_lock_reservations() on your array of
1331 * GEM objects used in the job but before updating the reservations with your
1332 * own fences.
1333 *
1334 * @fence_array: array of dma_fence * for the job to block on.
1335 * @obj: the gem object to add new dependencies from.
1336 * @write: whether the job might write the object (so we need to depend on
1337 * shared fences in the reservation object).
1338 */
drm_gem_fence_array_add_implicit(struct xarray * fence_array,struct drm_gem_object * obj,bool write)1339 int drm_gem_fence_array_add_implicit(struct xarray *fence_array,
1340 struct drm_gem_object *obj,
1341 bool write)
1342 {
1343 struct dma_resv_iter cursor;
1344 struct dma_fence *fence;
1345 int ret = 0;
1346
1347 dma_resv_for_each_fence(&cursor, obj->resv, write, fence) {
1348 ret = drm_gem_fence_array_add(fence_array, fence);
1349 if (ret)
1350 break;
1351 }
1352 return ret;
1353 }
1354 EXPORT_SYMBOL(drm_gem_fence_array_add_implicit);
1355