1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2019 Intel Corporation
4 */
5
6 #include "intel_memory_region.h"
7 #include "i915_drv.h"
8 #include "i915_ttm_buddy_manager.h"
9
10 static const struct {
11 u16 class;
12 u16 instance;
13 } intel_region_map[] = {
14 [INTEL_REGION_SMEM] = {
15 .class = INTEL_MEMORY_SYSTEM,
16 .instance = 0,
17 },
18 [INTEL_REGION_LMEM] = {
19 .class = INTEL_MEMORY_LOCAL,
20 .instance = 0,
21 },
22 [INTEL_REGION_STOLEN_SMEM] = {
23 .class = INTEL_MEMORY_STOLEN_SYSTEM,
24 .instance = 0,
25 },
26 [INTEL_REGION_STOLEN_LMEM] = {
27 .class = INTEL_MEMORY_STOLEN_LOCAL,
28 .instance = 0,
29 },
30 };
31
32 struct intel_memory_region *
intel_memory_region_lookup(struct drm_i915_private * i915,u16 class,u16 instance)33 intel_memory_region_lookup(struct drm_i915_private *i915,
34 u16 class, u16 instance)
35 {
36 struct intel_memory_region *mr;
37 int id;
38
39 /* XXX: consider maybe converting to an rb tree at some point */
40 for_each_memory_region(mr, i915, id) {
41 if (mr->type == class && mr->instance == instance)
42 return mr;
43 }
44
45 return NULL;
46 }
47
48 struct intel_memory_region *
intel_memory_region_by_type(struct drm_i915_private * i915,enum intel_memory_type mem_type)49 intel_memory_region_by_type(struct drm_i915_private *i915,
50 enum intel_memory_type mem_type)
51 {
52 struct intel_memory_region *mr;
53 int id;
54
55 for_each_memory_region(mr, i915, id)
56 if (mr->type == mem_type)
57 return mr;
58
59 return NULL;
60 }
61
62 /**
63 * intel_memory_region_reserve - Reserve a memory range
64 * @mem: The region for which we want to reserve a range.
65 * @offset: Start of the range to reserve.
66 * @size: The size of the range to reserve.
67 *
68 * Return: 0 on success, negative error code on failure.
69 */
intel_memory_region_reserve(struct intel_memory_region * mem,resource_size_t offset,resource_size_t size)70 int intel_memory_region_reserve(struct intel_memory_region *mem,
71 resource_size_t offset,
72 resource_size_t size)
73 {
74 struct ttm_resource_manager *man = mem->region_private;
75
76 GEM_BUG_ON(mem->is_range_manager);
77
78 return i915_ttm_buddy_man_reserve(man, offset, size);
79 }
80
intel_memory_region_debug(struct intel_memory_region * mr,struct drm_printer * printer)81 void intel_memory_region_debug(struct intel_memory_region *mr,
82 struct drm_printer *printer)
83 {
84 drm_printf(printer, "%s: ", mr->name);
85
86 if (mr->region_private)
87 ttm_resource_manager_debug(mr->region_private, printer);
88 else
89 drm_printf(printer, "total:%pa, available:%pa bytes\n",
90 &mr->total, &mr->avail);
91 }
92
93 struct intel_memory_region *
intel_memory_region_create(struct drm_i915_private * i915,resource_size_t start,resource_size_t size,resource_size_t min_page_size,resource_size_t io_start,u16 type,u16 instance,const struct intel_memory_region_ops * ops)94 intel_memory_region_create(struct drm_i915_private *i915,
95 resource_size_t start,
96 resource_size_t size,
97 resource_size_t min_page_size,
98 resource_size_t io_start,
99 u16 type,
100 u16 instance,
101 const struct intel_memory_region_ops *ops)
102 {
103 struct intel_memory_region *mem;
104 int err;
105
106 mem = kzalloc(sizeof(*mem), GFP_KERNEL);
107 if (!mem)
108 return ERR_PTR(-ENOMEM);
109
110 mem->i915 = i915;
111 mem->region = (struct resource)DEFINE_RES_MEM(start, size);
112 mem->io_start = io_start;
113 mem->min_page_size = min_page_size;
114 mem->ops = ops;
115 mem->total = size;
116 mem->avail = mem->total;
117 mem->type = type;
118 mem->instance = instance;
119
120 mutex_init(&mem->objects.lock);
121 INIT_LIST_HEAD(&mem->objects.list);
122
123 if (ops->init) {
124 err = ops->init(mem);
125 if (err)
126 goto err_free;
127 }
128
129 kref_init(&mem->kref);
130 return mem;
131
132 err_free:
133 kfree(mem);
134 return ERR_PTR(err);
135 }
136
intel_memory_region_set_name(struct intel_memory_region * mem,const char * fmt,...)137 void intel_memory_region_set_name(struct intel_memory_region *mem,
138 const char *fmt, ...)
139 {
140 va_list ap;
141
142 va_start(ap, fmt);
143 vsnprintf(mem->name, sizeof(mem->name), fmt, ap);
144 va_end(ap);
145 }
146
__intel_memory_region_destroy(struct kref * kref)147 static void __intel_memory_region_destroy(struct kref *kref)
148 {
149 struct intel_memory_region *mem =
150 container_of(kref, typeof(*mem), kref);
151
152 if (mem->ops->release)
153 mem->ops->release(mem);
154
155 mutex_destroy(&mem->objects.lock);
156 kfree(mem);
157 }
158
159 struct intel_memory_region *
intel_memory_region_get(struct intel_memory_region * mem)160 intel_memory_region_get(struct intel_memory_region *mem)
161 {
162 kref_get(&mem->kref);
163 return mem;
164 }
165
intel_memory_region_put(struct intel_memory_region * mem)166 void intel_memory_region_put(struct intel_memory_region *mem)
167 {
168 kref_put(&mem->kref, __intel_memory_region_destroy);
169 }
170
171 /* Global memory region registration -- only slight layer inversions! */
172
intel_memory_regions_hw_probe(struct drm_i915_private * i915)173 int intel_memory_regions_hw_probe(struct drm_i915_private *i915)
174 {
175 int err, i;
176
177 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
178 struct intel_memory_region *mem = ERR_PTR(-ENODEV);
179 u16 type, instance;
180
181 if (!HAS_REGION(i915, BIT(i)))
182 continue;
183
184 type = intel_region_map[i].class;
185 instance = intel_region_map[i].instance;
186 switch (type) {
187 case INTEL_MEMORY_SYSTEM:
188 if (IS_DGFX(i915))
189 mem = i915_gem_ttm_system_setup(i915, type,
190 instance);
191 else
192 mem = i915_gem_shmem_setup(i915, type,
193 instance);
194 break;
195 case INTEL_MEMORY_STOLEN_LOCAL:
196 mem = i915_gem_stolen_lmem_setup(i915, type, instance);
197 if (!IS_ERR(mem))
198 i915->mm.stolen_region = mem;
199 break;
200 case INTEL_MEMORY_STOLEN_SYSTEM:
201 mem = i915_gem_stolen_smem_setup(i915, type, instance);
202 if (!IS_ERR(mem))
203 i915->mm.stolen_region = mem;
204 break;
205 default:
206 continue;
207 }
208
209 if (IS_ERR(mem)) {
210 err = PTR_ERR(mem);
211 drm_err(&i915->drm,
212 "Failed to setup region(%d) type=%d\n",
213 err, type);
214 goto out_cleanup;
215 }
216
217 mem->id = i;
218 i915->mm.regions[i] = mem;
219 }
220
221 return 0;
222
223 out_cleanup:
224 intel_memory_regions_driver_release(i915);
225 return err;
226 }
227
intel_memory_regions_driver_release(struct drm_i915_private * i915)228 void intel_memory_regions_driver_release(struct drm_i915_private *i915)
229 {
230 int i;
231
232 for (i = 0; i < ARRAY_SIZE(i915->mm.regions); i++) {
233 struct intel_memory_region *region =
234 fetch_and_zero(&i915->mm.regions[i]);
235
236 if (region)
237 intel_memory_region_put(region);
238 }
239 }
240
241 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
242 #include "selftests/intel_memory_region.c"
243 #include "selftests/mock_region.c"
244 #endif
245