1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_CONTEXT_TYPES__ 7 #define __INTEL_CONTEXT_TYPES__ 8 9 #include <linux/average.h> 10 #include <linux/kref.h> 11 #include <linux/list.h> 12 #include <linux/mutex.h> 13 #include <linux/types.h> 14 15 #include "i915_active_types.h" 16 #include "i915_sw_fence.h" 17 #include "i915_utils.h" 18 #include "intel_engine_types.h" 19 #include "intel_sseu.h" 20 21 #include "uc/intel_guc_fwif.h" 22 23 #define CONTEXT_REDZONE POISON_INUSE 24 DECLARE_EWMA(runtime, 3, 8); 25 26 struct i915_gem_context; 27 struct i915_gem_ww_ctx; 28 struct i915_vma; 29 struct intel_breadcrumbs; 30 struct intel_context; 31 struct intel_ring; 32 33 struct intel_context_ops { 34 unsigned long flags; 35 #define COPS_HAS_INFLIGHT_BIT 0 36 #define COPS_HAS_INFLIGHT BIT(COPS_HAS_INFLIGHT_BIT) 37 38 int (*alloc)(struct intel_context *ce); 39 40 void (*ban)(struct intel_context *ce, struct i915_request *rq); 41 42 int (*pre_pin)(struct intel_context *ce, struct i915_gem_ww_ctx *ww, void **vaddr); 43 int (*pin)(struct intel_context *ce, void *vaddr); 44 void (*unpin)(struct intel_context *ce); 45 void (*post_unpin)(struct intel_context *ce); 46 47 void (*cancel_request)(struct intel_context *ce, 48 struct i915_request *rq); 49 50 void (*enter)(struct intel_context *ce); 51 void (*exit)(struct intel_context *ce); 52 53 void (*sched_disable)(struct intel_context *ce); 54 55 void (*reset)(struct intel_context *ce); 56 void (*destroy)(struct kref *kref); 57 58 /* virtual/parallel engine/context interface */ 59 struct intel_context *(*create_virtual)(struct intel_engine_cs **engine, 60 unsigned int count, 61 unsigned long flags); 62 struct intel_context *(*create_parallel)(struct intel_engine_cs **engines, 63 unsigned int num_siblings, 64 unsigned int width); 65 struct intel_engine_cs *(*get_sibling)(struct intel_engine_cs *engine, 66 unsigned int sibling); 67 }; 68 69 struct intel_context { 70 /* 71 * Note: Some fields may be accessed under RCU. 72 * 73 * Unless otherwise noted a field can safely be assumed to be protected 74 * by strong reference counting. 75 */ 76 union { 77 struct kref ref; /* no kref_get_unless_zero()! */ 78 struct rcu_head rcu; 79 }; 80 81 struct intel_engine_cs *engine; 82 struct intel_engine_cs *inflight; 83 #define __intel_context_inflight(engine) ptr_mask_bits(engine, 3) 84 #define __intel_context_inflight_count(engine) ptr_unmask_bits(engine, 3) 85 #define intel_context_inflight(ce) \ 86 __intel_context_inflight(READ_ONCE((ce)->inflight)) 87 #define intel_context_inflight_count(ce) \ 88 __intel_context_inflight_count(READ_ONCE((ce)->inflight)) 89 90 struct i915_address_space *vm; 91 struct i915_gem_context __rcu *gem_context; 92 93 /* 94 * @signal_lock protects the list of requests that need signaling, 95 * @signals. While there are any requests that need signaling, 96 * we add the context to the breadcrumbs worker, and remove it 97 * upon completion/cancellation of the last request. 98 */ 99 struct list_head signal_link; /* Accessed under RCU */ 100 struct list_head signals; /* Guarded by signal_lock */ 101 spinlock_t signal_lock; /* protects signals, the list of requests */ 102 103 struct i915_vma *state; 104 u32 ring_size; 105 struct intel_ring *ring; 106 struct intel_timeline *timeline; 107 108 unsigned long flags; 109 #define CONTEXT_BARRIER_BIT 0 110 #define CONTEXT_ALLOC_BIT 1 111 #define CONTEXT_INIT_BIT 2 112 #define CONTEXT_VALID_BIT 3 113 #define CONTEXT_CLOSED_BIT 4 114 #define CONTEXT_USE_SEMAPHORES 5 115 #define CONTEXT_BANNED 6 116 #define CONTEXT_FORCE_SINGLE_SUBMISSION 7 117 #define CONTEXT_NOPREEMPT 8 118 #define CONTEXT_LRCA_DIRTY 9 119 #define CONTEXT_GUC_INIT 10 120 #define CONTEXT_PERMA_PIN 11 121 122 struct { 123 u64 timeout_us; 124 } watchdog; 125 126 u32 *lrc_reg_state; 127 union { 128 struct { 129 u32 lrca; 130 u32 ccid; 131 }; 132 u64 desc; 133 } lrc; 134 u32 tag; /* cookie passed to HW to track this context on submission */ 135 136 /* Time on GPU as tracked by the hw. */ 137 struct { 138 struct ewma_runtime avg; 139 u64 total; 140 u32 last; 141 I915_SELFTEST_DECLARE(u32 num_underflow); 142 I915_SELFTEST_DECLARE(u32 max_underflow); 143 } runtime; 144 145 unsigned int active_count; /* protected by timeline->mutex */ 146 147 atomic_t pin_count; 148 struct mutex pin_mutex; /* guards pinning and associated on-gpuing */ 149 150 /** 151 * active: Active tracker for the rq activity (inc. external) on this 152 * intel_context object. 153 */ 154 struct i915_active active; 155 156 const struct intel_context_ops *ops; 157 158 /** sseu: Control eu/slice partitioning */ 159 struct intel_sseu sseu; 160 161 /** 162 * pinned_contexts_link: List link for the engine's pinned contexts. 163 * This is only used if this is a perma-pinned kernel context and 164 * the list is assumed to only be manipulated during driver load 165 * or unload time so no mutex protection currently. 166 */ 167 struct list_head pinned_contexts_link; 168 169 u8 wa_bb_page; /* if set, page num reserved for context workarounds */ 170 171 struct { 172 /** @lock: protects everything in guc_state */ 173 spinlock_t lock; 174 /** 175 * @sched_state: scheduling state of this context using GuC 176 * submission 177 */ 178 u32 sched_state; 179 /* 180 * @fences: maintains a list of requests that are currently 181 * being fenced until a GuC operation completes 182 */ 183 struct list_head fences; 184 /** 185 * @blocked: fence used to signal when the blocking of a 186 * context's submissions is complete. 187 */ 188 struct i915_sw_fence blocked; 189 /** @number_committed_requests: number of committed requests */ 190 int number_committed_requests; 191 /** @requests: list of active requests on this context */ 192 struct list_head requests; 193 /** @prio: the context's current guc priority */ 194 u8 prio; 195 /** 196 * @prio_count: a counter of the number requests in flight in 197 * each priority bucket 198 */ 199 u32 prio_count[GUC_CLIENT_PRIORITY_NUM]; 200 } guc_state; 201 202 struct { 203 /** 204 * @id: handle which is used to uniquely identify this context 205 * with the GuC, protected by guc->submission_state.lock 206 */ 207 u16 id; 208 /** 209 * @ref: the number of references to the guc_id, when 210 * transitioning in and out of zero protected by 211 * guc->submission_state.lock 212 */ 213 atomic_t ref; 214 /** 215 * @link: in guc->guc_id_list when the guc_id has no refs but is 216 * still valid, protected by guc->submission_state.lock 217 */ 218 struct list_head link; 219 } guc_id; 220 221 /** 222 * @destroyed_link: link in guc->submission_state.destroyed_contexts, in 223 * list when context is pending to be destroyed (deregistered with the 224 * GuC), protected by guc->submission_state.lock 225 */ 226 struct list_head destroyed_link; 227 228 /** @parallel: sub-structure for parallel submission members */ 229 struct { 230 union { 231 /** 232 * @child_list: parent's list of children 233 * contexts, no protection as immutable after context 234 * creation 235 */ 236 struct list_head child_list; 237 /** 238 * @child_link: child's link into parent's list of 239 * children 240 */ 241 struct list_head child_link; 242 }; 243 /** @parent: pointer to parent if child */ 244 struct intel_context *parent; 245 /** 246 * @last_rq: last request submitted on a parallel context, used 247 * to insert submit fences between requests in the parallel 248 * context 249 */ 250 struct i915_request *last_rq; 251 /** 252 * @fence_context: fence context composite fence when doing 253 * parallel submission 254 */ 255 u64 fence_context; 256 /** 257 * @seqno: seqno for composite fence when doing parallel 258 * submission 259 */ 260 u32 seqno; 261 /** @number_children: number of children if parent */ 262 u8 number_children; 263 /** @child_index: index into child_list if child */ 264 u8 child_index; 265 /** @guc: GuC specific members for parallel submission */ 266 struct { 267 /** @wqi_head: head pointer in work queue */ 268 u16 wqi_head; 269 /** @wqi_tail: tail pointer in work queue */ 270 u16 wqi_tail; 271 /** 272 * @parent_page: page in context state (ce->state) used 273 * by parent for work queue, process descriptor 274 */ 275 u8 parent_page; 276 } guc; 277 } parallel; 278 279 #ifdef CONFIG_DRM_I915_SELFTEST 280 /** 281 * @drop_schedule_enable: Force drop of schedule enable G2H for selftest 282 */ 283 bool drop_schedule_enable; 284 285 /** 286 * @drop_schedule_disable: Force drop of schedule disable G2H for 287 * selftest 288 */ 289 bool drop_schedule_disable; 290 291 /** 292 * @drop_deregister: Force drop of deregister G2H for selftest 293 */ 294 bool drop_deregister; 295 #endif 296 }; 297 298 #endif /* __INTEL_CONTEXT_TYPES__ */ 299