1 /* SPDX-License-Identifier: MIT */
2 /*
3 * Copyright © 2016 Intel Corporation
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
11 *
12 * The above copyright notice and this permission notice (including the next
13 * paragraph) shall be included in all copies or substantial portions of the
14 * Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 *
24 */
25
26 #ifndef __I915_VMA_TYPES_H__
27 #define __I915_VMA_TYPES_H__
28
29 #include <linux/rbtree.h>
30
31 #include <drm/drm_mm.h>
32
33 #include "gem/i915_gem_object_types.h"
34
35 enum i915_cache_level;
36
37 /**
38 * DOC: Global GTT views
39 *
40 * Background and previous state
41 *
42 * Historically objects could exists (be bound) in global GTT space only as
43 * singular instances with a view representing all of the object's backing pages
44 * in a linear fashion. This view will be called a normal view.
45 *
46 * To support multiple views of the same object, where the number of mapped
47 * pages is not equal to the backing store, or where the layout of the pages
48 * is not linear, concept of a GGTT view was added.
49 *
50 * One example of an alternative view is a stereo display driven by a single
51 * image. In this case we would have a framebuffer looking like this
52 * (2x2 pages):
53 *
54 * 12
55 * 34
56 *
57 * Above would represent a normal GGTT view as normally mapped for GPU or CPU
58 * rendering. In contrast, fed to the display engine would be an alternative
59 * view which could look something like this:
60 *
61 * 1212
62 * 3434
63 *
64 * In this example both the size and layout of pages in the alternative view is
65 * different from the normal view.
66 *
67 * Implementation and usage
68 *
69 * GGTT views are implemented using VMAs and are distinguished via enum
70 * i915_ggtt_view_type and struct i915_ggtt_view.
71 *
72 * A new flavour of core GEM functions which work with GGTT bound objects were
73 * added with the _ggtt_ infix, and sometimes with _view postfix to avoid
74 * renaming in large amounts of code. They take the struct i915_ggtt_view
75 * parameter encapsulating all metadata required to implement a view.
76 *
77 * As a helper for callers which are only interested in the normal view,
78 * globally const i915_ggtt_view_normal singleton instance exists. All old core
79 * GEM API functions, the ones not taking the view parameter, are operating on,
80 * or with the normal GGTT view.
81 *
82 * Code wanting to add or use a new GGTT view needs to:
83 *
84 * 1. Add a new enum with a suitable name.
85 * 2. Extend the metadata in the i915_ggtt_view structure if required.
86 * 3. Add support to i915_get_vma_pages().
87 *
88 * New views are required to build a scatter-gather table from within the
89 * i915_get_vma_pages function. This table is stored in the vma.ggtt_view and
90 * exists for the lifetime of an VMA.
91 *
92 * Core API is designed to have copy semantics which means that passed in
93 * struct i915_ggtt_view does not need to be persistent (left around after
94 * calling the core API functions).
95 *
96 */
97
98 struct intel_remapped_plane_info {
99 /* in gtt pages */
100 u32 offset;
101 u16 width;
102 u16 height;
103 u16 src_stride;
104 u16 dst_stride;
105 } __packed;
106
107 struct intel_remapped_info {
108 struct intel_remapped_plane_info plane[4];
109 /* in gtt pages */
110 u32 plane_alignment;
111 } __packed;
112
113 struct intel_rotation_info {
114 struct intel_remapped_plane_info plane[2];
115 } __packed;
116
117 struct intel_partial_info {
118 u64 offset;
119 unsigned int size;
120 } __packed;
121
122 enum i915_ggtt_view_type {
123 I915_GGTT_VIEW_NORMAL = 0,
124 I915_GGTT_VIEW_ROTATED = sizeof(struct intel_rotation_info),
125 I915_GGTT_VIEW_PARTIAL = sizeof(struct intel_partial_info),
126 I915_GGTT_VIEW_REMAPPED = sizeof(struct intel_remapped_info),
127 };
128
assert_i915_gem_gtt_types(void)129 static inline void assert_i915_gem_gtt_types(void)
130 {
131 BUILD_BUG_ON(sizeof(struct intel_rotation_info) != 2 * sizeof(u32) + 8 * sizeof(u16));
132 BUILD_BUG_ON(sizeof(struct intel_partial_info) != sizeof(u64) + sizeof(unsigned int));
133 BUILD_BUG_ON(sizeof(struct intel_remapped_info) != 5 * sizeof(u32) + 16 * sizeof(u16));
134
135 /* Check that rotation/remapped shares offsets for simplicity */
136 BUILD_BUG_ON(offsetof(struct intel_remapped_info, plane[0]) !=
137 offsetof(struct intel_rotation_info, plane[0]));
138 BUILD_BUG_ON(offsetofend(struct intel_remapped_info, plane[1]) !=
139 offsetofend(struct intel_rotation_info, plane[1]));
140
141 /* As we encode the size of each branch inside the union into its type,
142 * we have to be careful that each branch has a unique size.
143 */
144 switch ((enum i915_ggtt_view_type)0) {
145 case I915_GGTT_VIEW_NORMAL:
146 case I915_GGTT_VIEW_PARTIAL:
147 case I915_GGTT_VIEW_ROTATED:
148 case I915_GGTT_VIEW_REMAPPED:
149 /* gcc complains if these are identical cases */
150 break;
151 }
152 }
153
154 struct i915_ggtt_view {
155 enum i915_ggtt_view_type type;
156 union {
157 /* Members need to contain no holes/padding */
158 struct intel_partial_info partial;
159 struct intel_rotation_info rotated;
160 struct intel_remapped_info remapped;
161 };
162 };
163
164 /**
165 * DOC: Virtual Memory Address
166 *
167 * A VMA represents a GEM BO that is bound into an address space. Therefore, a
168 * VMA's presence cannot be guaranteed before binding, or after unbinding the
169 * object into/from the address space.
170 *
171 * To make things as simple as possible (ie. no refcounting), a VMA's lifetime
172 * will always be <= an objects lifetime. So object refcounting should cover us.
173 */
174 struct i915_vma {
175 struct drm_mm_node node;
176
177 struct i915_address_space *vm;
178 const struct i915_vma_ops *ops;
179
180 struct drm_i915_gem_object *obj;
181 struct dma_resv *resv; /** Alias of obj->resv */
182
183 struct sg_table *pages;
184 void __iomem *iomap;
185 void *private; /* owned by creator */
186
187 struct i915_fence_reg *fence;
188
189 u64 size;
190 u64 display_alignment;
191 struct i915_page_sizes page_sizes;
192
193 /* mmap-offset associated with fencing for this vma */
194 struct i915_mmap_offset *mmo;
195
196 u32 fence_size;
197 u32 fence_alignment;
198
199 /**
200 * Count of the number of times this vma has been opened by different
201 * handles (but same file) for execbuf, i.e. the number of aliases
202 * that exist in the ctx->handle_vmas LUT for this vma.
203 */
204 struct kref ref;
205 atomic_t open_count;
206 atomic_t flags;
207 /**
208 * How many users have pinned this object in GTT space.
209 *
210 * This is a tightly bound, fairly small number of users, so we
211 * stuff inside the flags field so that we can both check for overflow
212 * and detect a no-op i915_vma_pin() in a single check, while also
213 * pinning the vma.
214 *
215 * The worst case display setup would have the same vma pinned for
216 * use on each plane on each crtc, while also building the next atomic
217 * state and holding a pin for the length of the cleanup queue. In the
218 * future, the flip queue may be increased from 1.
219 * Estimated worst case: 3 [qlen] * 4 [max crtcs] * 7 [max planes] = 84
220 *
221 * For GEM, the number of concurrent users for pwrite/pread is
222 * unbounded. For execbuffer, it is currently one but will in future
223 * be extended to allow multiple clients to pin vma concurrently.
224 *
225 * We also use suballocated pages, with each suballocation claiming
226 * its own pin on the shared vma. At present, this is limited to
227 * exclusive cachelines of a single page, so a maximum of 64 possible
228 * users.
229 */
230 #define I915_VMA_PIN_MASK 0x3ff
231 #define I915_VMA_OVERFLOW 0x200
232
233 /** Flags and address space this VMA is bound to */
234 #define I915_VMA_GLOBAL_BIND_BIT 10
235 #define I915_VMA_LOCAL_BIND_BIT 11
236
237 #define I915_VMA_GLOBAL_BIND ((int)BIT(I915_VMA_GLOBAL_BIND_BIT))
238 #define I915_VMA_LOCAL_BIND ((int)BIT(I915_VMA_LOCAL_BIND_BIT))
239
240 #define I915_VMA_BIND_MASK (I915_VMA_GLOBAL_BIND | I915_VMA_LOCAL_BIND)
241
242 #define I915_VMA_ALLOC_BIT 12
243
244 #define I915_VMA_ERROR_BIT 13
245 #define I915_VMA_ERROR ((int)BIT(I915_VMA_ERROR_BIT))
246
247 #define I915_VMA_GGTT_BIT 14
248 #define I915_VMA_CAN_FENCE_BIT 15
249 #define I915_VMA_USERFAULT_BIT 16
250 #define I915_VMA_GGTT_WRITE_BIT 17
251
252 #define I915_VMA_GGTT ((int)BIT(I915_VMA_GGTT_BIT))
253 #define I915_VMA_CAN_FENCE ((int)BIT(I915_VMA_CAN_FENCE_BIT))
254 #define I915_VMA_USERFAULT ((int)BIT(I915_VMA_USERFAULT_BIT))
255 #define I915_VMA_GGTT_WRITE ((int)BIT(I915_VMA_GGTT_WRITE_BIT))
256
257 #define I915_VMA_SCANOUT_BIT 18
258 #define I915_VMA_SCANOUT ((int)BIT(I915_VMA_SCANOUT_BIT))
259
260 struct i915_active active;
261
262 #define I915_VMA_PAGES_BIAS 24
263 #define I915_VMA_PAGES_ACTIVE (BIT(24) | 1)
264 atomic_t pages_count; /* number of active binds to the pages */
265 struct mutex pages_mutex; /* protect acquire/release of backing pages */
266
267 /**
268 * Support different GGTT views into the same object.
269 * This means there can be multiple VMA mappings per object and per VM.
270 * i915_ggtt_view_type is used to distinguish between those entries.
271 * The default one of zero (I915_GGTT_VIEW_NORMAL) is default and also
272 * assumed in GEM functions which take no ggtt view parameter.
273 */
274 struct i915_ggtt_view ggtt_view;
275
276 /** This object's place on the active/inactive lists */
277 struct list_head vm_link;
278
279 struct list_head obj_link; /* Link in the object's VMA list */
280 struct rb_node obj_node;
281 struct hlist_node obj_hash;
282
283 /** This vma's place in the eviction list */
284 struct list_head evict_link;
285
286 struct list_head closed_link;
287 };
288
289 #endif
290