1 /* SPDX-License-Identifier: MIT */
2 /*
3  * Copyright © 2014-2019 Intel Corporation
4  */
5 
6 #ifndef _INTEL_GUC_H_
7 #define _INTEL_GUC_H_
8 
9 #include <linux/xarray.h>
10 #include <linux/delay.h>
11 
12 #include "intel_uncore.h"
13 #include "intel_guc_fw.h"
14 #include "intel_guc_fwif.h"
15 #include "intel_guc_ct.h"
16 #include "intel_guc_log.h"
17 #include "intel_guc_reg.h"
18 #include "intel_guc_slpc_types.h"
19 #include "intel_uc_fw.h"
20 #include "i915_utils.h"
21 #include "i915_vma.h"
22 
23 struct __guc_ads_blob;
24 
25 /**
26  * struct intel_guc - Top level structure of GuC.
27  *
28  * It handles firmware loading and manages client pool. intel_guc owns an
29  * i915_sched_engine for submission.
30  */
31 struct intel_guc {
32 	/** @fw: the GuC firmware */
33 	struct intel_uc_fw fw;
34 	/** @log: sub-structure containing GuC log related data and objects */
35 	struct intel_guc_log log;
36 	/** @ct: the command transport communication channel */
37 	struct intel_guc_ct ct;
38 	/** @slpc: sub-structure containing SLPC related data and objects */
39 	struct intel_guc_slpc slpc;
40 
41 	/** @sched_engine: Global engine used to submit requests to GuC */
42 	struct i915_sched_engine *sched_engine;
43 	/**
44 	 * @stalled_request: if GuC can't process a request for any reason, we
45 	 * save it until GuC restarts processing. No other request can be
46 	 * submitted until the stalled request is processed.
47 	 */
48 	struct i915_request *stalled_request;
49 	/**
50 	 * @submission_stall_reason: reason why submission is stalled
51 	 */
52 	enum {
53 		STALL_NONE,
54 		STALL_REGISTER_CONTEXT,
55 		STALL_MOVE_LRC_TAIL,
56 		STALL_ADD_REQUEST,
57 	} submission_stall_reason;
58 
59 	/* intel_guc_recv interrupt related state */
60 	/** @irq_lock: protects GuC irq state */
61 	spinlock_t irq_lock;
62 	/**
63 	 * @msg_enabled_mask: mask of events that are processed when receiving
64 	 * an INTEL_GUC_ACTION_DEFAULT G2H message.
65 	 */
66 	unsigned int msg_enabled_mask;
67 
68 	/**
69 	 * @outstanding_submission_g2h: number of outstanding GuC to Host
70 	 * responses related to GuC submission, used to determine if the GT is
71 	 * idle
72 	 */
73 	atomic_t outstanding_submission_g2h;
74 
75 	/** @interrupts: pointers to GuC interrupt-managing functions. */
76 	struct {
77 		void (*reset)(struct intel_guc *guc);
78 		void (*enable)(struct intel_guc *guc);
79 		void (*disable)(struct intel_guc *guc);
80 	} interrupts;
81 
82 	/**
83 	 * @submission_state: sub-structure for submission state protected by
84 	 * single lock
85 	 */
86 	struct {
87 		/**
88 		 * @lock: protects everything in submission_state,
89 		 * ce->guc_id.id, and ce->guc_id.ref when transitioning in and
90 		 * out of zero
91 		 */
92 		spinlock_t lock;
93 		/**
94 		 * @guc_ids: used to allocate new guc_ids, single-lrc
95 		 */
96 		struct ida guc_ids;
97 		/**
98 		 * @guc_ids_bitmap: used to allocate new guc_ids, multi-lrc
99 		 */
100 		unsigned long *guc_ids_bitmap;
101 		/**
102 		 * @guc_id_list: list of intel_context with valid guc_ids but no
103 		 * refs
104 		 */
105 		struct list_head guc_id_list;
106 		/**
107 		 * @destroyed_contexts: list of contexts waiting to be destroyed
108 		 * (deregistered with the GuC)
109 		 */
110 		struct list_head destroyed_contexts;
111 		/**
112 		 * @destroyed_worker: worker to deregister contexts, need as we
113 		 * need to take a GT PM reference and can't from destroy
114 		 * function as it might be in an atomic context (no sleeping)
115 		 */
116 		struct work_struct destroyed_worker;
117 	} submission_state;
118 
119 	/**
120 	 * @submission_supported: tracks whether we support GuC submission on
121 	 * the current platform
122 	 */
123 	bool submission_supported;
124 	/** @submission_selected: tracks whether the user enabled GuC submission */
125 	bool submission_selected;
126 	/**
127 	 * @rc_supported: tracks whether we support GuC rc on the current platform
128 	 */
129 	bool rc_supported;
130 	/** @rc_selected: tracks whether the user enabled GuC rc */
131 	bool rc_selected;
132 
133 	/** @ads_vma: object allocated to hold the GuC ADS */
134 	struct i915_vma *ads_vma;
135 	/** @ads_blob: contents of the GuC ADS */
136 	struct __guc_ads_blob *ads_blob;
137 	/** @ads_regset_size: size of the save/restore regsets in the ADS */
138 	u32 ads_regset_size;
139 	/** @ads_golden_ctxt_size: size of the golden contexts in the ADS */
140 	u32 ads_golden_ctxt_size;
141 
142 	/** @lrc_desc_pool: object allocated to hold the GuC LRC descriptor pool */
143 	struct i915_vma *lrc_desc_pool;
144 	/** @lrc_desc_pool_vaddr: contents of the GuC LRC descriptor pool */
145 	void *lrc_desc_pool_vaddr;
146 
147 	/**
148 	 * @context_lookup: used to resolve intel_context from guc_id, if a
149 	 * context is present in this structure it is registered with the GuC
150 	 */
151 	struct xarray context_lookup;
152 
153 	/** @params: Control params for fw initialization */
154 	u32 params[GUC_CTL_MAX_DWORDS];
155 
156 	/** @send_regs: GuC's FW specific registers used for sending MMIO H2G */
157 	struct {
158 		u32 base;
159 		unsigned int count;
160 		enum forcewake_domains fw_domains;
161 	} send_regs;
162 
163 	/** @notify_reg: register used to send interrupts to the GuC FW */
164 	i915_reg_t notify_reg;
165 
166 	/**
167 	 * @mmio_msg: notification bitmask that the GuC writes in one of its
168 	 * registers when the CT channel is disabled, to be processed when the
169 	 * channel is back up.
170 	 */
171 	u32 mmio_msg;
172 
173 	/** @send_mutex: used to serialize the intel_guc_send actions */
174 	struct mutex send_mutex;
175 };
176 
log_to_guc(struct intel_guc_log * log)177 static inline struct intel_guc *log_to_guc(struct intel_guc_log *log)
178 {
179 	return container_of(log, struct intel_guc, log);
180 }
181 
182 static
intel_guc_send(struct intel_guc * guc,const u32 * action,u32 len)183 inline int intel_guc_send(struct intel_guc *guc, const u32 *action, u32 len)
184 {
185 	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0, 0);
186 }
187 
188 static
intel_guc_send_nb(struct intel_guc * guc,const u32 * action,u32 len,u32 g2h_len_dw)189 inline int intel_guc_send_nb(struct intel_guc *guc, const u32 *action, u32 len,
190 			     u32 g2h_len_dw)
191 {
192 	return intel_guc_ct_send(&guc->ct, action, len, NULL, 0,
193 				 MAKE_SEND_FLAGS(g2h_len_dw));
194 }
195 
196 static inline int
intel_guc_send_and_receive(struct intel_guc * guc,const u32 * action,u32 len,u32 * response_buf,u32 response_buf_size)197 intel_guc_send_and_receive(struct intel_guc *guc, const u32 *action, u32 len,
198 			   u32 *response_buf, u32 response_buf_size)
199 {
200 	return intel_guc_ct_send(&guc->ct, action, len,
201 				 response_buf, response_buf_size, 0);
202 }
203 
intel_guc_send_busy_loop(struct intel_guc * guc,const u32 * action,u32 len,u32 g2h_len_dw,bool loop)204 static inline int intel_guc_send_busy_loop(struct intel_guc *guc,
205 					   const u32 *action,
206 					   u32 len,
207 					   u32 g2h_len_dw,
208 					   bool loop)
209 {
210 	int err;
211 	unsigned int sleep_period_ms = 1;
212 	bool not_atomic = !in_atomic() && !irqs_disabled();
213 
214 	/*
215 	 * FIXME: Have caller pass in if we are in an atomic context to avoid
216 	 * using in_atomic(). It is likely safe here as we check for irqs
217 	 * disabled which basically all the spin locks in the i915 do but
218 	 * regardless this should be cleaned up.
219 	 */
220 
221 	/* No sleeping with spin locks, just busy loop */
222 	might_sleep_if(loop && not_atomic);
223 
224 retry:
225 	err = intel_guc_send_nb(guc, action, len, g2h_len_dw);
226 	if (unlikely(err == -EBUSY && loop)) {
227 		if (likely(not_atomic)) {
228 			if (msleep_interruptible(sleep_period_ms))
229 				return -EINTR;
230 			sleep_period_ms = sleep_period_ms << 1;
231 		} else {
232 			cpu_relax();
233 		}
234 		goto retry;
235 	}
236 
237 	return err;
238 }
239 
intel_guc_to_host_event_handler(struct intel_guc * guc)240 static inline void intel_guc_to_host_event_handler(struct intel_guc *guc)
241 {
242 	intel_guc_ct_event_handler(&guc->ct);
243 }
244 
245 /* GuC addresses above GUC_GGTT_TOP also don't map through the GTT */
246 #define GUC_GGTT_TOP	0xFEE00000
247 
248 /**
249  * intel_guc_ggtt_offset() - Get and validate the GGTT offset of @vma
250  * @guc: intel_guc structure.
251  * @vma: i915 graphics virtual memory area.
252  *
253  * GuC does not allow any gfx GGTT address that falls into range
254  * [0, ggtt.pin_bias), which is reserved for Boot ROM, SRAM and WOPCM.
255  * Currently, in order to exclude [0, ggtt.pin_bias) address space from
256  * GGTT, all gfx objects used by GuC are allocated with intel_guc_allocate_vma()
257  * and pinned with PIN_OFFSET_BIAS along with the value of ggtt.pin_bias.
258  *
259  * Return: GGTT offset of the @vma.
260  */
intel_guc_ggtt_offset(struct intel_guc * guc,struct i915_vma * vma)261 static inline u32 intel_guc_ggtt_offset(struct intel_guc *guc,
262 					struct i915_vma *vma)
263 {
264 	u32 offset = i915_ggtt_offset(vma);
265 
266 	GEM_BUG_ON(offset < i915_ggtt_pin_bias(vma));
267 	GEM_BUG_ON(range_overflows_t(u64, offset, vma->size, GUC_GGTT_TOP));
268 
269 	return offset;
270 }
271 
272 void intel_guc_init_early(struct intel_guc *guc);
273 void intel_guc_init_late(struct intel_guc *guc);
274 void intel_guc_init_send_regs(struct intel_guc *guc);
275 void intel_guc_write_params(struct intel_guc *guc);
276 int intel_guc_init(struct intel_guc *guc);
277 void intel_guc_fini(struct intel_guc *guc);
278 void intel_guc_notify(struct intel_guc *guc);
279 int intel_guc_send_mmio(struct intel_guc *guc, const u32 *action, u32 len,
280 			u32 *response_buf, u32 response_buf_size);
281 int intel_guc_to_host_process_recv_msg(struct intel_guc *guc,
282 				       const u32 *payload, u32 len);
283 int intel_guc_auth_huc(struct intel_guc *guc, u32 rsa_offset);
284 int intel_guc_suspend(struct intel_guc *guc);
285 int intel_guc_resume(struct intel_guc *guc);
286 struct i915_vma *intel_guc_allocate_vma(struct intel_guc *guc, u32 size);
287 int intel_guc_allocate_and_map_vma(struct intel_guc *guc, u32 size,
288 				   struct i915_vma **out_vma, void **out_vaddr);
289 
intel_guc_is_supported(struct intel_guc * guc)290 static inline bool intel_guc_is_supported(struct intel_guc *guc)
291 {
292 	return intel_uc_fw_is_supported(&guc->fw);
293 }
294 
intel_guc_is_wanted(struct intel_guc * guc)295 static inline bool intel_guc_is_wanted(struct intel_guc *guc)
296 {
297 	return intel_uc_fw_is_enabled(&guc->fw);
298 }
299 
intel_guc_is_used(struct intel_guc * guc)300 static inline bool intel_guc_is_used(struct intel_guc *guc)
301 {
302 	GEM_BUG_ON(__intel_uc_fw_status(&guc->fw) == INTEL_UC_FIRMWARE_SELECTED);
303 	return intel_uc_fw_is_available(&guc->fw);
304 }
305 
intel_guc_is_fw_running(struct intel_guc * guc)306 static inline bool intel_guc_is_fw_running(struct intel_guc *guc)
307 {
308 	return intel_uc_fw_is_running(&guc->fw);
309 }
310 
intel_guc_is_ready(struct intel_guc * guc)311 static inline bool intel_guc_is_ready(struct intel_guc *guc)
312 {
313 	return intel_guc_is_fw_running(guc) && intel_guc_ct_enabled(&guc->ct);
314 }
315 
intel_guc_reset_interrupts(struct intel_guc * guc)316 static inline void intel_guc_reset_interrupts(struct intel_guc *guc)
317 {
318 	guc->interrupts.reset(guc);
319 }
320 
intel_guc_enable_interrupts(struct intel_guc * guc)321 static inline void intel_guc_enable_interrupts(struct intel_guc *guc)
322 {
323 	guc->interrupts.enable(guc);
324 }
325 
intel_guc_disable_interrupts(struct intel_guc * guc)326 static inline void intel_guc_disable_interrupts(struct intel_guc *guc)
327 {
328 	guc->interrupts.disable(guc);
329 }
330 
intel_guc_sanitize(struct intel_guc * guc)331 static inline int intel_guc_sanitize(struct intel_guc *guc)
332 {
333 	intel_uc_fw_sanitize(&guc->fw);
334 	intel_guc_disable_interrupts(guc);
335 	intel_guc_ct_sanitize(&guc->ct);
336 	guc->mmio_msg = 0;
337 
338 	return 0;
339 }
340 
intel_guc_enable_msg(struct intel_guc * guc,u32 mask)341 static inline void intel_guc_enable_msg(struct intel_guc *guc, u32 mask)
342 {
343 	spin_lock_irq(&guc->irq_lock);
344 	guc->msg_enabled_mask |= mask;
345 	spin_unlock_irq(&guc->irq_lock);
346 }
347 
intel_guc_disable_msg(struct intel_guc * guc,u32 mask)348 static inline void intel_guc_disable_msg(struct intel_guc *guc, u32 mask)
349 {
350 	spin_lock_irq(&guc->irq_lock);
351 	guc->msg_enabled_mask &= ~mask;
352 	spin_unlock_irq(&guc->irq_lock);
353 }
354 
355 int intel_guc_wait_for_idle(struct intel_guc *guc, long timeout);
356 
357 int intel_guc_deregister_done_process_msg(struct intel_guc *guc,
358 					  const u32 *msg, u32 len);
359 int intel_guc_sched_done_process_msg(struct intel_guc *guc,
360 				     const u32 *msg, u32 len);
361 int intel_guc_context_reset_process_msg(struct intel_guc *guc,
362 					const u32 *msg, u32 len);
363 int intel_guc_engine_failure_process_msg(struct intel_guc *guc,
364 					 const u32 *msg, u32 len);
365 
366 void intel_guc_find_hung_context(struct intel_engine_cs *engine);
367 
368 int intel_guc_global_policies_update(struct intel_guc *guc);
369 
370 void intel_guc_context_ban(struct intel_context *ce, struct i915_request *rq);
371 
372 void intel_guc_submission_reset_prepare(struct intel_guc *guc);
373 void intel_guc_submission_reset(struct intel_guc *guc, bool stalled);
374 void intel_guc_submission_reset_finish(struct intel_guc *guc);
375 void intel_guc_submission_cancel_requests(struct intel_guc *guc);
376 
377 void intel_guc_load_status(struct intel_guc *guc, struct drm_printer *p);
378 
379 void intel_guc_write_barrier(struct intel_guc *guc);
380 
381 #endif
382