1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include "intel_memory_region.h"
7 #include "gem/i915_gem_region.h"
8 #include "gem/i915_gem_lmem.h"
9 #include "i915_drv.h"
10
11 void __iomem *
i915_gem_object_lmem_io_map(struct drm_i915_gem_object * obj,unsigned long n,unsigned long size)12 i915_gem_object_lmem_io_map(struct drm_i915_gem_object *obj,
13 unsigned long n,
14 unsigned long size)
15 {
16 resource_size_t offset;
17
18 GEM_BUG_ON(!i915_gem_object_is_contiguous(obj));
19
20 offset = i915_gem_object_get_dma_address(obj, n);
21 offset -= obj->mm.region->region.start;
22
23 return io_mapping_map_wc(&obj->mm.region->iomap, offset, size);
24 }
25
26 /**
27 * i915_gem_object_is_lmem - Whether the object is resident in
28 * lmem
29 * @obj: The object to check.
30 *
31 * Even if an object is allowed to migrate and change memory region,
32 * this function checks whether it will always be present in lmem when
33 * valid *or* if that's not the case, whether it's currently resident in lmem.
34 * For migratable and evictable objects, the latter only makes sense when
35 * the object is locked.
36 *
37 * Return: Whether the object migratable but resident in lmem, or not
38 * migratable and will be present in lmem when valid.
39 */
i915_gem_object_is_lmem(struct drm_i915_gem_object * obj)40 bool i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
41 {
42 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
43
44 #ifdef CONFIG_LOCKDEP
45 if (i915_gem_object_migratable(obj) &&
46 i915_gem_object_evictable(obj))
47 assert_object_held(obj);
48 #endif
49 return mr && (mr->type == INTEL_MEMORY_LOCAL ||
50 mr->type == INTEL_MEMORY_STOLEN_LOCAL);
51 }
52
53 /**
54 * __i915_gem_object_is_lmem - Whether the object is resident in
55 * lmem while in the fence signaling critical path.
56 * @obj: The object to check.
57 *
58 * This function is intended to be called from within the fence signaling
59 * path where the fence, or a pin, keeps the object from being migrated. For
60 * example during gpu reset or similar.
61 *
62 * Return: Whether the object is resident in lmem.
63 */
__i915_gem_object_is_lmem(struct drm_i915_gem_object * obj)64 bool __i915_gem_object_is_lmem(struct drm_i915_gem_object *obj)
65 {
66 struct intel_memory_region *mr = READ_ONCE(obj->mm.region);
67
68 #ifdef CONFIG_LOCKDEP
69 GEM_WARN_ON(dma_resv_test_signaled(obj->base.resv, true) &&
70 i915_gem_object_evictable(obj));
71 #endif
72 return mr && (mr->type == INTEL_MEMORY_LOCAL ||
73 mr->type == INTEL_MEMORY_STOLEN_LOCAL);
74 }
75
76 /**
77 * __i915_gem_object_create_lmem_with_ps - Create lmem object and force the
78 * minimum page size for the backing pages.
79 * @i915: The i915 instance.
80 * @size: The size in bytes for the object. Note that we need to round the size
81 * up depending on the @page_size. The final object size can be fished out from
82 * the drm GEM object.
83 * @page_size: The requested minimum page size in bytes for this object. This is
84 * useful if we need something bigger than the regions min_page_size due to some
85 * hw restriction, or in some very specialised cases where it needs to be
86 * smaller, where the internal fragmentation cost is too great when rounding up
87 * the object size.
88 * @flags: The optional BO allocation flags.
89 *
90 * Note that this interface assumes you know what you are doing when forcing the
91 * @page_size. If this is smaller than the regions min_page_size then it can
92 * never be inserted into any GTT, otherwise it might lead to undefined
93 * behaviour.
94 *
95 * Return: The object pointer, which might be an ERR_PTR in the case of failure.
96 */
97 struct drm_i915_gem_object *
__i915_gem_object_create_lmem_with_ps(struct drm_i915_private * i915,resource_size_t size,resource_size_t page_size,unsigned int flags)98 __i915_gem_object_create_lmem_with_ps(struct drm_i915_private *i915,
99 resource_size_t size,
100 resource_size_t page_size,
101 unsigned int flags)
102 {
103 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
104 size, page_size, flags);
105 }
106
107 struct drm_i915_gem_object *
i915_gem_object_create_lmem_from_data(struct drm_i915_private * i915,const void * data,size_t size)108 i915_gem_object_create_lmem_from_data(struct drm_i915_private *i915,
109 const void *data, size_t size)
110 {
111 struct drm_i915_gem_object *obj;
112 void *map;
113
114 obj = i915_gem_object_create_lmem(i915,
115 round_up(size, PAGE_SIZE),
116 I915_BO_ALLOC_CONTIGUOUS);
117 if (IS_ERR(obj))
118 return obj;
119
120 map = i915_gem_object_pin_map_unlocked(obj, I915_MAP_WC);
121 if (IS_ERR(map)) {
122 i915_gem_object_put(obj);
123 return map;
124 }
125
126 memcpy(map, data, size);
127
128 i915_gem_object_unpin_map(obj);
129
130 return obj;
131 }
132
133 struct drm_i915_gem_object *
i915_gem_object_create_lmem(struct drm_i915_private * i915,resource_size_t size,unsigned int flags)134 i915_gem_object_create_lmem(struct drm_i915_private *i915,
135 resource_size_t size,
136 unsigned int flags)
137 {
138 return i915_gem_object_create_region(i915->mm.regions[INTEL_REGION_LMEM],
139 size, 0, flags);
140 }
141