1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2017 The Linux Foundation. All rights reserved.
3 */
4
5 #include <linux/kref.h>
6 #include <linux/uaccess.h>
7
8 #include "msm_gpu.h"
9
__msm_file_private_destroy(struct kref * kref)10 void __msm_file_private_destroy(struct kref *kref)
11 {
12 struct msm_file_private *ctx = container_of(kref,
13 struct msm_file_private, ref);
14 int i;
15
16 for (i = 0; i < ARRAY_SIZE(ctx->entities); i++) {
17 if (!ctx->entities[i])
18 continue;
19
20 drm_sched_entity_destroy(ctx->entities[i]);
21 kfree(ctx->entities[i]);
22 }
23
24 msm_gem_address_space_put(ctx->aspace);
25 kfree(ctx);
26 }
27
msm_submitqueue_destroy(struct kref * kref)28 void msm_submitqueue_destroy(struct kref *kref)
29 {
30 struct msm_gpu_submitqueue *queue = container_of(kref,
31 struct msm_gpu_submitqueue, ref);
32
33 idr_destroy(&queue->fence_idr);
34
35 msm_file_private_put(queue->ctx);
36
37 kfree(queue);
38 }
39
msm_submitqueue_get(struct msm_file_private * ctx,u32 id)40 struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
41 u32 id)
42 {
43 struct msm_gpu_submitqueue *entry;
44
45 if (!ctx)
46 return NULL;
47
48 read_lock(&ctx->queuelock);
49
50 list_for_each_entry(entry, &ctx->submitqueues, node) {
51 if (entry->id == id) {
52 kref_get(&entry->ref);
53 read_unlock(&ctx->queuelock);
54
55 return entry;
56 }
57 }
58
59 read_unlock(&ctx->queuelock);
60 return NULL;
61 }
62
msm_submitqueue_close(struct msm_file_private * ctx)63 void msm_submitqueue_close(struct msm_file_private *ctx)
64 {
65 struct msm_gpu_submitqueue *entry, *tmp;
66
67 if (!ctx)
68 return;
69
70 /*
71 * No lock needed in close and there won't
72 * be any more user ioctls coming our way
73 */
74 list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) {
75 list_del(&entry->node);
76 msm_submitqueue_put(entry);
77 }
78 }
79
80 static struct drm_sched_entity *
get_sched_entity(struct msm_file_private * ctx,struct msm_ringbuffer * ring,unsigned ring_nr,enum drm_sched_priority sched_prio)81 get_sched_entity(struct msm_file_private *ctx, struct msm_ringbuffer *ring,
82 unsigned ring_nr, enum drm_sched_priority sched_prio)
83 {
84 static DEFINE_MUTEX(entity_lock);
85 unsigned idx = (ring_nr * NR_SCHED_PRIORITIES) + sched_prio;
86
87 /* We should have already validated that the requested priority is
88 * valid by the time we get here.
89 */
90 if (WARN_ON(idx >= ARRAY_SIZE(ctx->entities)))
91 return ERR_PTR(-EINVAL);
92
93 mutex_lock(&entity_lock);
94
95 if (!ctx->entities[idx]) {
96 struct drm_sched_entity *entity;
97 struct drm_gpu_scheduler *sched = &ring->sched;
98 int ret;
99
100 entity = kzalloc(sizeof(*ctx->entities[idx]), GFP_KERNEL);
101
102 ret = drm_sched_entity_init(entity, sched_prio, &sched, 1, NULL);
103 if (ret) {
104 mutex_unlock(&entity_lock);
105 kfree(entity);
106 return ERR_PTR(ret);
107 }
108
109 ctx->entities[idx] = entity;
110 }
111
112 mutex_unlock(&entity_lock);
113
114 return ctx->entities[idx];
115 }
116
msm_submitqueue_create(struct drm_device * drm,struct msm_file_private * ctx,u32 prio,u32 flags,u32 * id)117 int msm_submitqueue_create(struct drm_device *drm, struct msm_file_private *ctx,
118 u32 prio, u32 flags, u32 *id)
119 {
120 struct msm_drm_private *priv = drm->dev_private;
121 struct msm_gpu_submitqueue *queue;
122 enum drm_sched_priority sched_prio;
123 unsigned ring_nr;
124 int ret;
125
126 if (!ctx)
127 return -ENODEV;
128
129 if (!priv->gpu)
130 return -ENODEV;
131
132 ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio);
133 if (ret)
134 return ret;
135
136 queue = kzalloc(sizeof(*queue), GFP_KERNEL);
137
138 if (!queue)
139 return -ENOMEM;
140
141 kref_init(&queue->ref);
142 queue->flags = flags;
143 queue->ring_nr = ring_nr;
144
145 queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr],
146 ring_nr, sched_prio);
147 if (IS_ERR(queue->entity)) {
148 ret = PTR_ERR(queue->entity);
149 kfree(queue);
150 return ret;
151 }
152
153 write_lock(&ctx->queuelock);
154
155 queue->ctx = msm_file_private_get(ctx);
156 queue->id = ctx->queueid++;
157
158 if (id)
159 *id = queue->id;
160
161 idr_init(&queue->fence_idr);
162 mutex_init(&queue->lock);
163
164 list_add_tail(&queue->node, &ctx->submitqueues);
165
166 write_unlock(&ctx->queuelock);
167
168 return 0;
169 }
170
171 /*
172 * Create the default submit-queue (id==0), used for backwards compatibility
173 * for userspace that pre-dates the introduction of submitqueues.
174 */
msm_submitqueue_init(struct drm_device * drm,struct msm_file_private * ctx)175 int msm_submitqueue_init(struct drm_device *drm, struct msm_file_private *ctx)
176 {
177 struct msm_drm_private *priv = drm->dev_private;
178 int default_prio, max_priority;
179
180 if (!priv->gpu)
181 return -ENODEV;
182
183 max_priority = (priv->gpu->nr_rings * NR_SCHED_PRIORITIES) - 1;
184
185 /*
186 * Pick a medium priority level as default. Lower numeric value is
187 * higher priority, so round-up to pick a priority that is not higher
188 * than the middle priority level.
189 */
190 default_prio = DIV_ROUND_UP(max_priority, 2);
191
192 return msm_submitqueue_create(drm, ctx, default_prio, 0, NULL);
193 }
194
msm_submitqueue_query_faults(struct msm_gpu_submitqueue * queue,struct drm_msm_submitqueue_query * args)195 static int msm_submitqueue_query_faults(struct msm_gpu_submitqueue *queue,
196 struct drm_msm_submitqueue_query *args)
197 {
198 size_t size = min_t(size_t, args->len, sizeof(queue->faults));
199 int ret;
200
201 /* If a zero length was passed in, return the data size we expect */
202 if (!args->len) {
203 args->len = sizeof(queue->faults);
204 return 0;
205 }
206
207 /* Set the length to the actual size of the data */
208 args->len = size;
209
210 ret = copy_to_user(u64_to_user_ptr(args->data), &queue->faults, size);
211
212 return ret ? -EFAULT : 0;
213 }
214
msm_submitqueue_query(struct drm_device * drm,struct msm_file_private * ctx,struct drm_msm_submitqueue_query * args)215 int msm_submitqueue_query(struct drm_device *drm, struct msm_file_private *ctx,
216 struct drm_msm_submitqueue_query *args)
217 {
218 struct msm_gpu_submitqueue *queue;
219 int ret = -EINVAL;
220
221 if (args->pad)
222 return -EINVAL;
223
224 queue = msm_submitqueue_get(ctx, args->id);
225 if (!queue)
226 return -ENOENT;
227
228 if (args->param == MSM_SUBMITQUEUE_PARAM_FAULTS)
229 ret = msm_submitqueue_query_faults(queue, args);
230
231 msm_submitqueue_put(queue);
232
233 return ret;
234 }
235
msm_submitqueue_remove(struct msm_file_private * ctx,u32 id)236 int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
237 {
238 struct msm_gpu_submitqueue *entry;
239
240 if (!ctx)
241 return 0;
242
243 /*
244 * id 0 is the "default" queue and can't be destroyed
245 * by the user
246 */
247 if (!id)
248 return -ENOENT;
249
250 write_lock(&ctx->queuelock);
251
252 list_for_each_entry(entry, &ctx->submitqueues, node) {
253 if (entry->id == id) {
254 list_del(&entry->node);
255 write_unlock(&ctx->queuelock);
256
257 msm_submitqueue_put(entry);
258 return 0;
259 }
260 }
261
262 write_unlock(&ctx->queuelock);
263 return -ENOENT;
264 }
265
266