1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2020-2021 Intel Corporation
4 */
5
6 #include "gt/intel_migrate.h"
7
igt_fill_check_buffer(struct drm_i915_gem_object * obj,bool fill)8 static int igt_fill_check_buffer(struct drm_i915_gem_object *obj,
9 bool fill)
10 {
11 struct drm_i915_private *i915 = to_i915(obj->base.dev);
12 unsigned int i, count = obj->base.size / sizeof(u32);
13 enum i915_map_type map_type =
14 i915_coherent_map_type(i915, obj, false);
15 u32 *cur;
16 int err = 0;
17
18 assert_object_held(obj);
19 cur = i915_gem_object_pin_map(obj, map_type);
20 if (IS_ERR(cur))
21 return PTR_ERR(cur);
22
23 if (fill)
24 for (i = 0; i < count; ++i)
25 *cur++ = i;
26 else
27 for (i = 0; i < count; ++i)
28 if (*cur++ != i) {
29 pr_err("Object content mismatch at location %d of %d\n", i, count);
30 err = -EINVAL;
31 break;
32 }
33
34 i915_gem_object_unpin_map(obj);
35
36 return err;
37 }
38
igt_create_migrate(struct intel_gt * gt,enum intel_region_id src,enum intel_region_id dst)39 static int igt_create_migrate(struct intel_gt *gt, enum intel_region_id src,
40 enum intel_region_id dst)
41 {
42 struct drm_i915_private *i915 = gt->i915;
43 struct intel_memory_region *src_mr = i915->mm.regions[src];
44 struct drm_i915_gem_object *obj;
45 struct i915_gem_ww_ctx ww;
46 int err = 0;
47
48 GEM_BUG_ON(!src_mr);
49
50 /* Switch object backing-store on create */
51 obj = i915_gem_object_create_region(src_mr, PAGE_SIZE, 0, 0);
52 if (IS_ERR(obj))
53 return PTR_ERR(obj);
54
55 for_i915_gem_ww(&ww, err, true) {
56 err = i915_gem_object_lock(obj, &ww);
57 if (err)
58 continue;
59
60 err = igt_fill_check_buffer(obj, true);
61 if (err)
62 continue;
63
64 err = i915_gem_object_migrate(obj, &ww, dst);
65 if (err)
66 continue;
67
68 err = i915_gem_object_pin_pages(obj);
69 if (err)
70 continue;
71
72 if (i915_gem_object_can_migrate(obj, src))
73 err = -EINVAL;
74
75 i915_gem_object_unpin_pages(obj);
76 err = i915_gem_object_wait_migration(obj, true);
77 if (err)
78 continue;
79
80 err = igt_fill_check_buffer(obj, false);
81 }
82 i915_gem_object_put(obj);
83
84 return err;
85 }
86
igt_smem_create_migrate(void * arg)87 static int igt_smem_create_migrate(void *arg)
88 {
89 return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_SMEM);
90 }
91
igt_lmem_create_migrate(void * arg)92 static int igt_lmem_create_migrate(void *arg)
93 {
94 return igt_create_migrate(arg, INTEL_REGION_SMEM, INTEL_REGION_LMEM);
95 }
96
igt_same_create_migrate(void * arg)97 static int igt_same_create_migrate(void *arg)
98 {
99 return igt_create_migrate(arg, INTEL_REGION_LMEM, INTEL_REGION_LMEM);
100 }
101
lmem_pages_migrate_one(struct i915_gem_ww_ctx * ww,struct drm_i915_gem_object * obj)102 static int lmem_pages_migrate_one(struct i915_gem_ww_ctx *ww,
103 struct drm_i915_gem_object *obj)
104 {
105 int err;
106
107 err = i915_gem_object_lock(obj, ww);
108 if (err)
109 return err;
110
111 if (i915_gem_object_is_lmem(obj)) {
112 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_SMEM);
113 if (err) {
114 pr_err("Object failed migration to smem\n");
115 if (err)
116 return err;
117 }
118
119 if (i915_gem_object_is_lmem(obj)) {
120 pr_err("object still backed by lmem\n");
121 err = -EINVAL;
122 }
123
124 if (!i915_gem_object_has_struct_page(obj)) {
125 pr_err("object not backed by struct page\n");
126 err = -EINVAL;
127 }
128
129 } else {
130 err = i915_gem_object_migrate(obj, ww, INTEL_REGION_LMEM);
131 if (err) {
132 pr_err("Object failed migration to lmem\n");
133 if (err)
134 return err;
135 }
136
137 if (i915_gem_object_has_struct_page(obj)) {
138 pr_err("object still backed by struct page\n");
139 err = -EINVAL;
140 }
141
142 if (!i915_gem_object_is_lmem(obj)) {
143 pr_err("object not backed by lmem\n");
144 err = -EINVAL;
145 }
146 }
147
148 return err;
149 }
150
igt_lmem_pages_migrate(void * arg)151 static int igt_lmem_pages_migrate(void *arg)
152 {
153 struct intel_gt *gt = arg;
154 struct drm_i915_private *i915 = gt->i915;
155 struct drm_i915_gem_object *obj;
156 struct i915_gem_ww_ctx ww;
157 struct i915_request *rq;
158 int err;
159 int i;
160
161 /* From LMEM to shmem and back again */
162
163 obj = i915_gem_object_create_lmem(i915, SZ_2M, 0);
164 if (IS_ERR(obj))
165 return PTR_ERR(obj);
166
167 /* Initial GPU fill, sync, CPU initialization. */
168 for_i915_gem_ww(&ww, err, true) {
169 err = i915_gem_object_lock(obj, &ww);
170 if (err)
171 continue;
172
173 err = ____i915_gem_object_get_pages(obj);
174 if (err)
175 continue;
176
177 err = intel_migrate_clear(>->migrate, &ww, NULL,
178 obj->mm.pages->sgl, obj->cache_level,
179 i915_gem_object_is_lmem(obj),
180 0xdeadbeaf, &rq);
181 if (rq) {
182 dma_resv_add_excl_fence(obj->base.resv, &rq->fence);
183 i915_request_put(rq);
184 }
185 if (err)
186 continue;
187
188 err = i915_gem_object_wait(obj, I915_WAIT_INTERRUPTIBLE,
189 5 * HZ);
190 if (err)
191 continue;
192
193 err = igt_fill_check_buffer(obj, true);
194 if (err)
195 continue;
196 }
197 if (err)
198 goto out_put;
199
200 /*
201 * Migrate to and from smem without explicitly syncing.
202 * Finalize with data in smem for fast readout.
203 */
204 for (i = 1; i <= 5; ++i) {
205 for_i915_gem_ww(&ww, err, true)
206 err = lmem_pages_migrate_one(&ww, obj);
207 if (err)
208 goto out_put;
209 }
210
211 err = i915_gem_object_lock_interruptible(obj, NULL);
212 if (err)
213 goto out_put;
214
215 /* Finally sync migration and check content. */
216 err = i915_gem_object_wait_migration(obj, true);
217 if (err)
218 goto out_unlock;
219
220 err = igt_fill_check_buffer(obj, false);
221
222 out_unlock:
223 i915_gem_object_unlock(obj);
224 out_put:
225 i915_gem_object_put(obj);
226
227 return err;
228 }
229
i915_gem_migrate_live_selftests(struct drm_i915_private * i915)230 int i915_gem_migrate_live_selftests(struct drm_i915_private *i915)
231 {
232 static const struct i915_subtest tests[] = {
233 SUBTEST(igt_smem_create_migrate),
234 SUBTEST(igt_lmem_create_migrate),
235 SUBTEST(igt_same_create_migrate),
236 SUBTEST(igt_lmem_pages_migrate),
237 };
238
239 if (!HAS_LMEM(i915))
240 return 0;
241
242 return intel_gt_live_subtests(tests, &i915->gt);
243 }
244