1 // SPDX-License-Identifier: MIT
2 /*
3 * Copyright © 2016 Intel Corporation
4 */
5
6 #include "gem/i915_gem_context.h"
7 #include "gt/intel_ring.h"
8
9 #include "i915_drv.h"
10 #include "intel_context.h"
11 #include "intel_engine_pm.h"
12
13 #include "mock_engine.h"
14 #include "selftests/mock_request.h"
15
mock_timeline_pin(struct intel_timeline * tl)16 static int mock_timeline_pin(struct intel_timeline *tl)
17 {
18 int err;
19
20 if (WARN_ON(!i915_gem_object_trylock(tl->hwsp_ggtt->obj)))
21 return -EBUSY;
22
23 err = intel_timeline_pin_map(tl);
24 i915_gem_object_unlock(tl->hwsp_ggtt->obj);
25 if (err)
26 return err;
27
28 atomic_inc(&tl->pin_count);
29 return 0;
30 }
31
mock_timeline_unpin(struct intel_timeline * tl)32 static void mock_timeline_unpin(struct intel_timeline *tl)
33 {
34 GEM_BUG_ON(!atomic_read(&tl->pin_count));
35 atomic_dec(&tl->pin_count);
36 }
37
mock_ring(struct intel_engine_cs * engine)38 static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
39 {
40 const unsigned long sz = PAGE_SIZE / 2;
41 struct intel_ring *ring;
42
43 ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
44 if (!ring)
45 return NULL;
46
47 kref_init(&ring->ref);
48 ring->size = sz;
49 ring->effective_size = sz;
50 ring->vaddr = (void *)(ring + 1);
51 atomic_set(&ring->pin_count, 1);
52
53 ring->vma = i915_vma_alloc();
54 if (!ring->vma) {
55 kfree(ring);
56 return NULL;
57 }
58 i915_active_init(&ring->vma->active, NULL, NULL, 0);
59 __set_bit(I915_VMA_GGTT_BIT, __i915_vma_flags(ring->vma));
60 __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &ring->vma->node.flags);
61 ring->vma->node.size = sz;
62
63 intel_ring_update_space(ring);
64
65 return ring;
66 }
67
mock_ring_free(struct intel_ring * ring)68 static void mock_ring_free(struct intel_ring *ring)
69 {
70 i915_active_fini(&ring->vma->active);
71 i915_vma_free(ring->vma);
72
73 kfree(ring);
74 }
75
first_request(struct mock_engine * engine)76 static struct i915_request *first_request(struct mock_engine *engine)
77 {
78 return list_first_entry_or_null(&engine->hw_queue,
79 struct i915_request,
80 mock.link);
81 }
82
advance(struct i915_request * request)83 static void advance(struct i915_request *request)
84 {
85 list_del_init(&request->mock.link);
86 i915_request_mark_complete(request);
87 GEM_BUG_ON(!i915_request_completed(request));
88
89 intel_engine_signal_breadcrumbs(request->engine);
90 }
91
hw_delay_complete(struct timer_list * t)92 static void hw_delay_complete(struct timer_list *t)
93 {
94 struct mock_engine *engine = from_timer(engine, t, hw_delay);
95 struct i915_request *request;
96 unsigned long flags;
97
98 spin_lock_irqsave(&engine->hw_lock, flags);
99
100 /* Timer fired, first request is complete */
101 request = first_request(engine);
102 if (request)
103 advance(request);
104
105 /*
106 * Also immediately signal any subsequent 0-delay requests, but
107 * requeue the timer for the next delayed request.
108 */
109 while ((request = first_request(engine))) {
110 if (request->mock.delay) {
111 mod_timer(&engine->hw_delay,
112 jiffies + request->mock.delay);
113 break;
114 }
115
116 advance(request);
117 }
118
119 spin_unlock_irqrestore(&engine->hw_lock, flags);
120 }
121
mock_context_unpin(struct intel_context * ce)122 static void mock_context_unpin(struct intel_context *ce)
123 {
124 }
125
mock_context_post_unpin(struct intel_context * ce)126 static void mock_context_post_unpin(struct intel_context *ce)
127 {
128 }
129
mock_context_destroy(struct kref * ref)130 static void mock_context_destroy(struct kref *ref)
131 {
132 struct intel_context *ce = container_of(ref, typeof(*ce), ref);
133
134 GEM_BUG_ON(intel_context_is_pinned(ce));
135
136 if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
137 mock_ring_free(ce->ring);
138 mock_timeline_unpin(ce->timeline);
139 }
140
141 intel_context_fini(ce);
142 intel_context_free(ce);
143 }
144
mock_context_alloc(struct intel_context * ce)145 static int mock_context_alloc(struct intel_context *ce)
146 {
147 int err;
148
149 ce->ring = mock_ring(ce->engine);
150 if (!ce->ring)
151 return -ENOMEM;
152
153 ce->timeline = intel_timeline_create(ce->engine->gt);
154 if (IS_ERR(ce->timeline)) {
155 kfree(ce->engine);
156 return PTR_ERR(ce->timeline);
157 }
158
159 err = mock_timeline_pin(ce->timeline);
160 if (err) {
161 intel_timeline_put(ce->timeline);
162 ce->timeline = NULL;
163 return err;
164 }
165
166 return 0;
167 }
168
mock_context_pre_pin(struct intel_context * ce,struct i915_gem_ww_ctx * ww,void ** unused)169 static int mock_context_pre_pin(struct intel_context *ce,
170 struct i915_gem_ww_ctx *ww, void **unused)
171 {
172 return 0;
173 }
174
mock_context_pin(struct intel_context * ce,void * unused)175 static int mock_context_pin(struct intel_context *ce, void *unused)
176 {
177 return 0;
178 }
179
mock_context_reset(struct intel_context * ce)180 static void mock_context_reset(struct intel_context *ce)
181 {
182 }
183
184 static const struct intel_context_ops mock_context_ops = {
185 .alloc = mock_context_alloc,
186
187 .pre_pin = mock_context_pre_pin,
188 .pin = mock_context_pin,
189 .unpin = mock_context_unpin,
190 .post_unpin = mock_context_post_unpin,
191
192 .enter = intel_context_enter_engine,
193 .exit = intel_context_exit_engine,
194
195 .reset = mock_context_reset,
196 .destroy = mock_context_destroy,
197 };
198
mock_request_alloc(struct i915_request * request)199 static int mock_request_alloc(struct i915_request *request)
200 {
201 INIT_LIST_HEAD(&request->mock.link);
202 request->mock.delay = 0;
203
204 return 0;
205 }
206
mock_emit_flush(struct i915_request * request,unsigned int flags)207 static int mock_emit_flush(struct i915_request *request,
208 unsigned int flags)
209 {
210 return 0;
211 }
212
mock_emit_breadcrumb(struct i915_request * request,u32 * cs)213 static u32 *mock_emit_breadcrumb(struct i915_request *request, u32 *cs)
214 {
215 return cs;
216 }
217
mock_submit_request(struct i915_request * request)218 static void mock_submit_request(struct i915_request *request)
219 {
220 struct mock_engine *engine =
221 container_of(request->engine, typeof(*engine), base);
222 unsigned long flags;
223
224 i915_request_submit(request);
225
226 spin_lock_irqsave(&engine->hw_lock, flags);
227 list_add_tail(&request->mock.link, &engine->hw_queue);
228 if (list_is_first(&request->mock.link, &engine->hw_queue)) {
229 if (request->mock.delay)
230 mod_timer(&engine->hw_delay,
231 jiffies + request->mock.delay);
232 else
233 advance(request);
234 }
235 spin_unlock_irqrestore(&engine->hw_lock, flags);
236 }
237
mock_add_to_engine(struct i915_request * rq)238 static void mock_add_to_engine(struct i915_request *rq)
239 {
240 lockdep_assert_held(&rq->engine->sched_engine->lock);
241 list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests);
242 }
243
mock_remove_from_engine(struct i915_request * rq)244 static void mock_remove_from_engine(struct i915_request *rq)
245 {
246 struct intel_engine_cs *engine, *locked;
247
248 /*
249 * Virtual engines complicate acquiring the engine timeline lock,
250 * as their rq->engine pointer is not stable until under that
251 * engine lock. The simple ploy we use is to take the lock then
252 * check that the rq still belongs to the newly locked engine.
253 */
254
255 locked = READ_ONCE(rq->engine);
256 spin_lock_irq(&locked->sched_engine->lock);
257 while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) {
258 spin_unlock(&locked->sched_engine->lock);
259 spin_lock(&engine->sched_engine->lock);
260 locked = engine;
261 }
262 list_del_init(&rq->sched.link);
263 spin_unlock_irq(&locked->sched_engine->lock);
264 }
265
mock_reset_prepare(struct intel_engine_cs * engine)266 static void mock_reset_prepare(struct intel_engine_cs *engine)
267 {
268 }
269
mock_reset_rewind(struct intel_engine_cs * engine,bool stalled)270 static void mock_reset_rewind(struct intel_engine_cs *engine, bool stalled)
271 {
272 GEM_BUG_ON(stalled);
273 }
274
mock_reset_cancel(struct intel_engine_cs * engine)275 static void mock_reset_cancel(struct intel_engine_cs *engine)
276 {
277 struct mock_engine *mock =
278 container_of(engine, typeof(*mock), base);
279 struct i915_request *rq;
280 unsigned long flags;
281
282 del_timer_sync(&mock->hw_delay);
283
284 spin_lock_irqsave(&engine->sched_engine->lock, flags);
285
286 /* Mark all submitted requests as skipped. */
287 list_for_each_entry(rq, &engine->sched_engine->requests, sched.link)
288 i915_request_put(i915_request_mark_eio(rq));
289 intel_engine_signal_breadcrumbs(engine);
290
291 /* Cancel and submit all pending requests. */
292 list_for_each_entry(rq, &mock->hw_queue, mock.link) {
293 if (i915_request_mark_eio(rq)) {
294 __i915_request_submit(rq);
295 i915_request_put(rq);
296 }
297 }
298 INIT_LIST_HEAD(&mock->hw_queue);
299
300 spin_unlock_irqrestore(&engine->sched_engine->lock, flags);
301 }
302
mock_reset_finish(struct intel_engine_cs * engine)303 static void mock_reset_finish(struct intel_engine_cs *engine)
304 {
305 }
306
mock_engine_release(struct intel_engine_cs * engine)307 static void mock_engine_release(struct intel_engine_cs *engine)
308 {
309 struct mock_engine *mock =
310 container_of(engine, typeof(*mock), base);
311
312 GEM_BUG_ON(timer_pending(&mock->hw_delay));
313
314 i915_sched_engine_put(engine->sched_engine);
315 intel_breadcrumbs_put(engine->breadcrumbs);
316
317 intel_context_unpin(engine->kernel_context);
318 intel_context_put(engine->kernel_context);
319
320 intel_engine_fini_retire(engine);
321 }
322
mock_engine(struct drm_i915_private * i915,const char * name,int id)323 struct intel_engine_cs *mock_engine(struct drm_i915_private *i915,
324 const char *name,
325 int id)
326 {
327 struct mock_engine *engine;
328
329 GEM_BUG_ON(id >= I915_NUM_ENGINES);
330 GEM_BUG_ON(!i915->gt.uncore);
331
332 engine = kzalloc(sizeof(*engine) + PAGE_SIZE, GFP_KERNEL);
333 if (!engine)
334 return NULL;
335
336 /* minimal engine setup for requests */
337 engine->base.i915 = i915;
338 engine->base.gt = &i915->gt;
339 engine->base.uncore = i915->gt.uncore;
340 snprintf(engine->base.name, sizeof(engine->base.name), "%s", name);
341 engine->base.id = id;
342 engine->base.mask = BIT(id);
343 engine->base.legacy_idx = INVALID_ENGINE;
344 engine->base.instance = id;
345 engine->base.status_page.addr = (void *)(engine + 1);
346
347 engine->base.cops = &mock_context_ops;
348 engine->base.request_alloc = mock_request_alloc;
349 engine->base.emit_flush = mock_emit_flush;
350 engine->base.emit_fini_breadcrumb = mock_emit_breadcrumb;
351 engine->base.submit_request = mock_submit_request;
352 engine->base.add_active_request = mock_add_to_engine;
353 engine->base.remove_active_request = mock_remove_from_engine;
354
355 engine->base.reset.prepare = mock_reset_prepare;
356 engine->base.reset.rewind = mock_reset_rewind;
357 engine->base.reset.cancel = mock_reset_cancel;
358 engine->base.reset.finish = mock_reset_finish;
359
360 engine->base.release = mock_engine_release;
361
362 i915->gt.engine[id] = &engine->base;
363 i915->gt.engine_class[0][id] = &engine->base;
364
365 /* fake hw queue */
366 spin_lock_init(&engine->hw_lock);
367 timer_setup(&engine->hw_delay, hw_delay_complete, 0);
368 INIT_LIST_HEAD(&engine->hw_queue);
369
370 intel_engine_add_user(&engine->base);
371
372 return &engine->base;
373 }
374
mock_engine_init(struct intel_engine_cs * engine)375 int mock_engine_init(struct intel_engine_cs *engine)
376 {
377 struct intel_context *ce;
378
379 INIT_LIST_HEAD(&engine->pinned_contexts_list);
380
381 engine->sched_engine = i915_sched_engine_create(ENGINE_MOCK);
382 if (!engine->sched_engine)
383 return -ENOMEM;
384 engine->sched_engine->private_data = engine;
385
386 intel_engine_init_execlists(engine);
387 intel_engine_init__pm(engine);
388 intel_engine_init_retire(engine);
389
390 engine->breadcrumbs = intel_breadcrumbs_create(NULL);
391 if (!engine->breadcrumbs)
392 goto err_schedule;
393
394 ce = create_kernel_context(engine);
395 if (IS_ERR(ce))
396 goto err_breadcrumbs;
397
398 /* We insist the kernel context is using the status_page */
399 engine->status_page.vma = ce->timeline->hwsp_ggtt;
400
401 engine->kernel_context = ce;
402 return 0;
403
404 err_breadcrumbs:
405 intel_breadcrumbs_put(engine->breadcrumbs);
406 err_schedule:
407 i915_sched_engine_put(engine->sched_engine);
408 return -ENOMEM;
409 }
410
mock_engine_flush(struct intel_engine_cs * engine)411 void mock_engine_flush(struct intel_engine_cs *engine)
412 {
413 struct mock_engine *mock =
414 container_of(engine, typeof(*mock), base);
415 struct i915_request *request, *rn;
416
417 del_timer_sync(&mock->hw_delay);
418
419 spin_lock_irq(&mock->hw_lock);
420 list_for_each_entry_safe(request, rn, &mock->hw_queue, mock.link)
421 advance(request);
422 spin_unlock_irq(&mock->hw_lock);
423 }
424
mock_engine_reset(struct intel_engine_cs * engine)425 void mock_engine_reset(struct intel_engine_cs *engine)
426 {
427 }
428