1 /* SPDX-License-Identifier: MIT */ 2 /* 3 * Copyright © 2019 Intel Corporation 4 */ 5 6 #ifndef __INTEL_GT_TYPES__ 7 #define __INTEL_GT_TYPES__ 8 9 #include <linux/ktime.h> 10 #include <linux/list.h> 11 #include <linux/llist.h> 12 #include <linux/mutex.h> 13 #include <linux/notifier.h> 14 #include <linux/spinlock.h> 15 #include <linux/types.h> 16 #include <linux/workqueue.h> 17 18 #include "uc/intel_uc.h" 19 20 #include "i915_vma.h" 21 #include "intel_engine_types.h" 22 #include "intel_gt_buffer_pool_types.h" 23 #include "intel_llc_types.h" 24 #include "intel_reset_types.h" 25 #include "intel_rc6_types.h" 26 #include "intel_rps_types.h" 27 #include "intel_migrate_types.h" 28 #include "intel_wakeref.h" 29 #include "pxp/intel_pxp_types.h" 30 31 struct drm_i915_private; 32 struct i915_ggtt; 33 struct intel_engine_cs; 34 struct intel_uncore; 35 36 struct intel_mmio_range { 37 u32 start; 38 u32 end; 39 }; 40 41 /* 42 * The hardware has multiple kinds of multicast register ranges that need 43 * special register steering (and future platforms are expected to add 44 * additional types). 45 * 46 * During driver startup, we initialize the steering control register to 47 * direct reads to a slice/subslice that are valid for the 'subslice' class 48 * of multicast registers. If another type of steering does not have any 49 * overlap in valid steering targets with 'subslice' style registers, we will 50 * need to explicitly re-steer reads of registers of the other type. 51 * 52 * Only the replication types that may need additional non-default steering 53 * are listed here. 54 */ 55 enum intel_steering_type { 56 L3BANK, 57 MSLICE, 58 LNCF, 59 60 NUM_STEERING_TYPES 61 }; 62 63 enum intel_submission_method { 64 INTEL_SUBMISSION_RING, 65 INTEL_SUBMISSION_ELSP, 66 INTEL_SUBMISSION_GUC, 67 }; 68 69 struct intel_gt { 70 struct drm_i915_private *i915; 71 struct intel_uncore *uncore; 72 struct i915_ggtt *ggtt; 73 74 struct intel_uc uc; 75 76 struct i915_wa_list wa_list; 77 78 struct intel_gt_timelines { 79 spinlock_t lock; /* protects active_list */ 80 struct list_head active_list; 81 } timelines; 82 83 struct intel_gt_requests { 84 /** 85 * We leave the user IRQ off as much as possible, 86 * but this means that requests will finish and never 87 * be retired once the system goes idle. Set a timer to 88 * fire periodically while the ring is running. When it 89 * fires, go retire requests. 90 */ 91 struct delayed_work retire_work; 92 } requests; 93 94 struct { 95 struct llist_head list; 96 struct work_struct work; 97 } watchdog; 98 99 struct intel_wakeref wakeref; 100 atomic_t user_wakeref; 101 102 struct list_head closed_vma; 103 spinlock_t closed_lock; /* guards the list of closed_vma */ 104 105 ktime_t last_init_time; 106 struct intel_reset reset; 107 108 /** 109 * Is the GPU currently considered idle, or busy executing 110 * userspace requests? Whilst idle, we allow runtime power 111 * management to power down the hardware and display clocks. 112 * In order to reduce the effect on performance, there 113 * is a slight delay before we do so. 114 */ 115 intel_wakeref_t awake; 116 117 u32 clock_frequency; 118 u32 clock_period_ns; 119 120 struct intel_llc llc; 121 struct intel_rc6 rc6; 122 struct intel_rps rps; 123 124 spinlock_t irq_lock; 125 u32 gt_imr; 126 u32 pm_ier; 127 u32 pm_imr; 128 129 u32 pm_guc_events; 130 131 struct { 132 bool active; 133 134 /** 135 * @lock: Lock protecting the below fields. 136 */ 137 seqcount_mutex_t lock; 138 139 /** 140 * @total: Total time this engine was busy. 141 * 142 * Accumulated time not counting the most recent block in cases 143 * where engine is currently busy (active > 0). 144 */ 145 ktime_t total; 146 147 /** 148 * @start: Timestamp of the last idle to active transition. 149 * 150 * Idle is defined as active == 0, active is active > 0. 151 */ 152 ktime_t start; 153 } stats; 154 155 struct intel_engine_cs *engine[I915_NUM_ENGINES]; 156 struct intel_engine_cs *engine_class[MAX_ENGINE_CLASS + 1] 157 [MAX_ENGINE_INSTANCE + 1]; 158 enum intel_submission_method submission_method; 159 160 /* 161 * Default address space (either GGTT or ppGTT depending on arch). 162 * 163 * Reserved for exclusive use by the kernel. 164 */ 165 struct i915_address_space *vm; 166 167 /* 168 * A pool of objects to use as shadow copies of client batch buffers 169 * when the command parser is enabled. Prevents the client from 170 * modifying the batch contents after software parsing. 171 * 172 * Buffers older than 1s are periodically reaped from the pool, 173 * or may be reclaimed by the shrinker before then. 174 */ 175 struct intel_gt_buffer_pool buffer_pool; 176 177 struct i915_vma *scratch; 178 179 struct intel_migrate migrate; 180 181 const struct intel_mmio_range *steering_table[NUM_STEERING_TYPES]; 182 183 struct intel_gt_info { 184 intel_engine_mask_t engine_mask; 185 186 u32 l3bank_mask; 187 188 u8 num_engines; 189 190 /* General presence of SFC units */ 191 u8 sfc_mask; 192 193 /* Media engine access to SFC per instance */ 194 u8 vdbox_sfc_access; 195 196 /* Slice/subslice/EU info */ 197 struct sseu_dev_info sseu; 198 199 unsigned long mslice_mask; 200 } info; 201 202 struct { 203 u8 uc_index; 204 } mocs; 205 206 struct intel_pxp pxp; 207 }; 208 209 enum intel_gt_scratch_field { 210 /* 8 bytes */ 211 INTEL_GT_SCRATCH_FIELD_DEFAULT = 0, 212 213 /* 8 bytes */ 214 INTEL_GT_SCRATCH_FIELD_RENDER_FLUSH = 128, 215 216 /* 8 bytes */ 217 INTEL_GT_SCRATCH_FIELD_COHERENTL3_WA = 256, 218 219 /* 6 * 8 bytes */ 220 INTEL_GT_SCRATCH_FIELD_PERF_CS_GPR = 2048, 221 222 /* 4 bytes */ 223 INTEL_GT_SCRATCH_FIELD_PERF_PREDICATE_RESULT_1 = 2096, 224 }; 225 226 #endif /* __INTEL_GT_TYPES_H__ */ 227