1 /*
2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
3 *
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
10 *
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
13 * Software.
14 *
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 * SOFTWARE.
22 *
23 * Authors:
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
26 *
27 * Contributors:
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
33 *
34 */
35
36 #include "i915_drv.h"
37 #include "gt/intel_context.h"
38 #include "gt/intel_gpu_commands.h"
39 #include "gt/intel_ring.h"
40 #include "gvt.h"
41 #include "trace.h"
42
43 #define GEN9_MOCS_SIZE 64
44
45 /* Raw offset is appened to each line for convenience. */
46 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
47 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
48 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
49 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
50 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
51 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
52 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
53 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
54 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
55 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
56 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
57 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
58 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
59 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
60 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
61 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
62 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
63 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
64 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
65 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
66 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
67 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
68 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
69
70 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
71 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
72 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
73 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
74 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
75 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
76 };
77
78 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
79 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
80 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
81 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
82 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
83 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
84 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
85 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
86 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
87 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
88 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
89 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
90 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
91 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
92 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
93 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
94 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
95 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
96 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
97 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
98 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
99 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
100 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
101
102 {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
103 {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
104 {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
105 {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
106 {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
107 {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
108 {RCS0, GEN9_SCRATCH1, 0, false}, /* 0xb11c */
109 {RCS0, GEN9_SCRATCH_LNCF1, 0, false}, /* 0xb008 */
110 {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
111 {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
112 {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
113 {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
114 {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
115 {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
116 {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
117 {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
118 {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */
119 {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */
120 {RCS0, TRVADR, 0, true}, /* 0x4df0 */
121 {RCS0, TRTTE, 0, true}, /* 0x4df4 */
122 {RCS0, _MMIO(0x4dfc), 0, true},
123
124 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
125 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
126 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
127 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
128 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
129
130 {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
131
132 {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
133
134 {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
135 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
136 {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
137 {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
138
139 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
140 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
141 {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
142
143 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
144 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
145 {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
146 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
147 };
148
149 static struct {
150 bool initialized;
151 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
152 u32 l3cc_table[GEN9_MOCS_SIZE / 2];
153 } gen9_render_mocs;
154
155 static u32 gen9_mocs_mmio_offset_list[] = {
156 [RCS0] = 0xc800,
157 [VCS0] = 0xc900,
158 [VCS1] = 0xca00,
159 [BCS0] = 0xcc00,
160 [VECS0] = 0xcb00,
161 };
162
load_render_mocs(const struct intel_engine_cs * engine)163 static void load_render_mocs(const struct intel_engine_cs *engine)
164 {
165 struct intel_gvt *gvt = engine->i915->gvt;
166 struct intel_uncore *uncore = engine->uncore;
167 u32 cnt = gvt->engine_mmio_list.mocs_mmio_offset_list_cnt;
168 u32 *regs = gvt->engine_mmio_list.mocs_mmio_offset_list;
169 i915_reg_t offset;
170 int ring_id, i;
171
172 /* Platform doesn't have mocs mmios. */
173 if (!regs)
174 return;
175
176 for (ring_id = 0; ring_id < cnt; ring_id++) {
177 if (!HAS_ENGINE(engine->gt, ring_id))
178 continue;
179
180 offset.reg = regs[ring_id];
181 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
182 gen9_render_mocs.control_table[ring_id][i] =
183 intel_uncore_read_fw(uncore, offset);
184 offset.reg += 4;
185 }
186 }
187
188 offset.reg = 0xb020;
189 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
190 gen9_render_mocs.l3cc_table[i] =
191 intel_uncore_read_fw(uncore, offset);
192 offset.reg += 4;
193 }
194 gen9_render_mocs.initialized = true;
195 }
196
197 static int
restore_context_mmio_for_inhibit(struct intel_vgpu * vgpu,struct i915_request * req)198 restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
199 struct i915_request *req)
200 {
201 u32 *cs;
202 int ret;
203 struct engine_mmio *mmio;
204 struct intel_gvt *gvt = vgpu->gvt;
205 int ring_id = req->engine->id;
206 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
207
208 if (count == 0)
209 return 0;
210
211 ret = req->engine->emit_flush(req, EMIT_BARRIER);
212 if (ret)
213 return ret;
214
215 cs = intel_ring_begin(req, count * 2 + 2);
216 if (IS_ERR(cs))
217 return PTR_ERR(cs);
218
219 *cs++ = MI_LOAD_REGISTER_IMM(count);
220 for (mmio = gvt->engine_mmio_list.mmio;
221 i915_mmio_reg_valid(mmio->reg); mmio++) {
222 if (mmio->id != ring_id || !mmio->in_context)
223 continue;
224
225 *cs++ = i915_mmio_reg_offset(mmio->reg);
226 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) | (mmio->mask << 16);
227 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
228 *(cs-2), *(cs-1), vgpu->id, ring_id);
229 }
230
231 *cs++ = MI_NOOP;
232 intel_ring_advance(req, cs);
233
234 ret = req->engine->emit_flush(req, EMIT_BARRIER);
235 if (ret)
236 return ret;
237
238 return 0;
239 }
240
241 static int
restore_render_mocs_control_for_inhibit(struct intel_vgpu * vgpu,struct i915_request * req)242 restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
243 struct i915_request *req)
244 {
245 unsigned int index;
246 u32 *cs;
247
248 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
249 if (IS_ERR(cs))
250 return PTR_ERR(cs);
251
252 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
253
254 for (index = 0; index < GEN9_MOCS_SIZE; index++) {
255 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
256 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
257 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
258 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
259
260 }
261
262 *cs++ = MI_NOOP;
263 intel_ring_advance(req, cs);
264
265 return 0;
266 }
267
268 static int
restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu * vgpu,struct i915_request * req)269 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
270 struct i915_request *req)
271 {
272 unsigned int index;
273 u32 *cs;
274
275 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
276 if (IS_ERR(cs))
277 return PTR_ERR(cs);
278
279 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
280
281 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
282 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
283 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
284 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
285 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
286
287 }
288
289 *cs++ = MI_NOOP;
290 intel_ring_advance(req, cs);
291
292 return 0;
293 }
294
295 /*
296 * Use lri command to initialize the mmio which is in context state image for
297 * inhibit context, it contains tracked engine mmio, render_mocs and
298 * render_mocs_l3cc.
299 */
intel_vgpu_restore_inhibit_context(struct intel_vgpu * vgpu,struct i915_request * req)300 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
301 struct i915_request *req)
302 {
303 int ret;
304 u32 *cs;
305
306 cs = intel_ring_begin(req, 2);
307 if (IS_ERR(cs))
308 return PTR_ERR(cs);
309
310 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
311 *cs++ = MI_NOOP;
312 intel_ring_advance(req, cs);
313
314 ret = restore_context_mmio_for_inhibit(vgpu, req);
315 if (ret)
316 goto out;
317
318 /* no MOCS register in context except render engine */
319 if (req->engine->id != RCS0)
320 goto out;
321
322 ret = restore_render_mocs_control_for_inhibit(vgpu, req);
323 if (ret)
324 goto out;
325
326 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
327 if (ret)
328 goto out;
329
330 out:
331 cs = intel_ring_begin(req, 2);
332 if (IS_ERR(cs))
333 return PTR_ERR(cs);
334
335 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
336 *cs++ = MI_NOOP;
337 intel_ring_advance(req, cs);
338
339 return ret;
340 }
341
342 static u32 gen8_tlb_mmio_offset_list[] = {
343 [RCS0] = 0x4260,
344 [VCS0] = 0x4264,
345 [VCS1] = 0x4268,
346 [BCS0] = 0x426c,
347 [VECS0] = 0x4270,
348 };
349
handle_tlb_pending_event(struct intel_vgpu * vgpu,const struct intel_engine_cs * engine)350 static void handle_tlb_pending_event(struct intel_vgpu *vgpu,
351 const struct intel_engine_cs *engine)
352 {
353 struct intel_uncore *uncore = engine->uncore;
354 struct intel_vgpu_submission *s = &vgpu->submission;
355 u32 *regs = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list;
356 u32 cnt = vgpu->gvt->engine_mmio_list.tlb_mmio_offset_list_cnt;
357 enum forcewake_domains fw;
358 i915_reg_t reg;
359
360 if (!regs)
361 return;
362
363 if (drm_WARN_ON(&engine->i915->drm, engine->id >= cnt))
364 return;
365
366 if (!test_and_clear_bit(engine->id, (void *)s->tlb_handle_pending))
367 return;
368
369 reg = _MMIO(regs[engine->id]);
370
371 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
372 * we need to put a forcewake when invalidating RCS TLB caches,
373 * otherwise device can go to RC6 state and interrupt invalidation
374 * process
375 */
376 fw = intel_uncore_forcewake_for_reg(uncore, reg,
377 FW_REG_READ | FW_REG_WRITE);
378 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) >= 9)
379 fw |= FORCEWAKE_RENDER;
380
381 intel_uncore_forcewake_get(uncore, fw);
382
383 intel_uncore_write_fw(uncore, reg, 0x1);
384
385 if (wait_for_atomic(intel_uncore_read_fw(uncore, reg) == 0, 50))
386 gvt_vgpu_err("timeout in invalidate ring %s tlb\n",
387 engine->name);
388 else
389 vgpu_vreg_t(vgpu, reg) = 0;
390
391 intel_uncore_forcewake_put(uncore, fw);
392
393 gvt_dbg_core("invalidate TLB for ring %s\n", engine->name);
394 }
395
switch_mocs(struct intel_vgpu * pre,struct intel_vgpu * next,const struct intel_engine_cs * engine)396 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
397 const struct intel_engine_cs *engine)
398 {
399 u32 regs[] = {
400 [RCS0] = 0xc800,
401 [VCS0] = 0xc900,
402 [VCS1] = 0xca00,
403 [BCS0] = 0xcc00,
404 [VECS0] = 0xcb00,
405 };
406 struct intel_uncore *uncore = engine->uncore;
407 i915_reg_t offset, l3_offset;
408 u32 old_v, new_v;
409 int i;
410
411 if (drm_WARN_ON(&engine->i915->drm, engine->id >= ARRAY_SIZE(regs)))
412 return;
413
414 if (engine->id == RCS0 && GRAPHICS_VER(engine->i915) == 9)
415 return;
416
417 if (!pre && !gen9_render_mocs.initialized)
418 load_render_mocs(engine);
419
420 offset.reg = regs[engine->id];
421 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
422 if (pre)
423 old_v = vgpu_vreg_t(pre, offset);
424 else
425 old_v = gen9_render_mocs.control_table[engine->id][i];
426 if (next)
427 new_v = vgpu_vreg_t(next, offset);
428 else
429 new_v = gen9_render_mocs.control_table[engine->id][i];
430
431 if (old_v != new_v)
432 intel_uncore_write_fw(uncore, offset, new_v);
433
434 offset.reg += 4;
435 }
436
437 if (engine->id == RCS0) {
438 l3_offset.reg = 0xb020;
439 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
440 if (pre)
441 old_v = vgpu_vreg_t(pre, l3_offset);
442 else
443 old_v = gen9_render_mocs.l3cc_table[i];
444 if (next)
445 new_v = vgpu_vreg_t(next, l3_offset);
446 else
447 new_v = gen9_render_mocs.l3cc_table[i];
448
449 if (old_v != new_v)
450 intel_uncore_write_fw(uncore, l3_offset, new_v);
451
452 l3_offset.reg += 4;
453 }
454 }
455 }
456
457 #define CTX_CONTEXT_CONTROL_VAL 0x03
458
is_inhibit_context(struct intel_context * ce)459 bool is_inhibit_context(struct intel_context *ce)
460 {
461 const u32 *reg_state = ce->lrc_reg_state;
462 u32 inhibit_mask =
463 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
464
465 return inhibit_mask ==
466 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
467 }
468
469 /* Switch ring mmio values (context). */
switch_mmio(struct intel_vgpu * pre,struct intel_vgpu * next,const struct intel_engine_cs * engine)470 static void switch_mmio(struct intel_vgpu *pre,
471 struct intel_vgpu *next,
472 const struct intel_engine_cs *engine)
473 {
474 struct intel_uncore *uncore = engine->uncore;
475 struct intel_vgpu_submission *s;
476 struct engine_mmio *mmio;
477 u32 old_v, new_v;
478
479 if (GRAPHICS_VER(engine->i915) >= 9)
480 switch_mocs(pre, next, engine);
481
482 for (mmio = engine->i915->gvt->engine_mmio_list.mmio;
483 i915_mmio_reg_valid(mmio->reg); mmio++) {
484 if (mmio->id != engine->id)
485 continue;
486 /*
487 * No need to do save or restore of the mmio which is in context
488 * state image on gen9, it's initialized by lri command and
489 * save or restore with context together.
490 */
491 if (GRAPHICS_VER(engine->i915) == 9 && mmio->in_context)
492 continue;
493
494 // save
495 if (pre) {
496 vgpu_vreg_t(pre, mmio->reg) =
497 intel_uncore_read_fw(uncore, mmio->reg);
498 if (mmio->mask)
499 vgpu_vreg_t(pre, mmio->reg) &=
500 ~(mmio->mask << 16);
501 old_v = vgpu_vreg_t(pre, mmio->reg);
502 } else {
503 old_v = mmio->value =
504 intel_uncore_read_fw(uncore, mmio->reg);
505 }
506
507 // restore
508 if (next) {
509 s = &next->submission;
510 /*
511 * No need to restore the mmio which is in context state
512 * image if it's not inhibit context, it will restore
513 * itself.
514 */
515 if (mmio->in_context &&
516 !is_inhibit_context(s->shadow[engine->id]))
517 continue;
518
519 if (mmio->mask)
520 new_v = vgpu_vreg_t(next, mmio->reg) |
521 (mmio->mask << 16);
522 else
523 new_v = vgpu_vreg_t(next, mmio->reg);
524 } else {
525 if (mmio->in_context)
526 continue;
527 if (mmio->mask)
528 new_v = mmio->value | (mmio->mask << 16);
529 else
530 new_v = mmio->value;
531 }
532
533 intel_uncore_write_fw(uncore, mmio->reg, new_v);
534
535 trace_render_mmio(pre ? pre->id : 0,
536 next ? next->id : 0,
537 "switch",
538 i915_mmio_reg_offset(mmio->reg),
539 old_v, new_v);
540 }
541
542 if (next)
543 handle_tlb_pending_event(next, engine);
544 }
545
546 /**
547 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
548 * @pre: the last vGPU that own the engine
549 * @next: the vGPU to switch to
550 * @engine: the engine
551 *
552 * If pre is null indicates that host own the engine. If next is null
553 * indicates that we are switching to host workload.
554 */
intel_gvt_switch_mmio(struct intel_vgpu * pre,struct intel_vgpu * next,const struct intel_engine_cs * engine)555 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
556 struct intel_vgpu *next,
557 const struct intel_engine_cs *engine)
558 {
559 if (WARN(!pre && !next, "switch ring %s from host to HOST\n",
560 engine->name))
561 return;
562
563 gvt_dbg_render("switch ring %s from %s to %s\n", engine->name,
564 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
565
566 /**
567 * We are using raw mmio access wrapper to improve the
568 * performace for batch mmio read/write, so we need
569 * handle forcewake mannually.
570 */
571 intel_uncore_forcewake_get(engine->uncore, FORCEWAKE_ALL);
572 switch_mmio(pre, next, engine);
573 intel_uncore_forcewake_put(engine->uncore, FORCEWAKE_ALL);
574 }
575
576 /**
577 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
578 * @gvt: GVT device
579 *
580 */
intel_gvt_init_engine_mmio_context(struct intel_gvt * gvt)581 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
582 {
583 struct engine_mmio *mmio;
584
585 if (GRAPHICS_VER(gvt->gt->i915) >= 9) {
586 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
587 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
588 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
589 gvt->engine_mmio_list.mocs_mmio_offset_list = gen9_mocs_mmio_offset_list;
590 gvt->engine_mmio_list.mocs_mmio_offset_list_cnt = ARRAY_SIZE(gen9_mocs_mmio_offset_list);
591 } else {
592 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
593 gvt->engine_mmio_list.tlb_mmio_offset_list = gen8_tlb_mmio_offset_list;
594 gvt->engine_mmio_list.tlb_mmio_offset_list_cnt = ARRAY_SIZE(gen8_tlb_mmio_offset_list);
595 }
596
597 for (mmio = gvt->engine_mmio_list.mmio;
598 i915_mmio_reg_valid(mmio->reg); mmio++) {
599 if (mmio->in_context) {
600 gvt->engine_mmio_list.ctx_mmio_count[mmio->id]++;
601 intel_gvt_mmio_set_sr_in_ctx(gvt, mmio->reg.reg);
602 }
603 }
604 }
605