1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3 * virtio-snd: Virtio sound device
4 * Copyright (C) 2021 OpenSynergy GmbH
5 */
6 #include <sound/pcm_params.h>
7
8 #include "virtio_card.h"
9
10 /**
11 * struct virtio_pcm_msg - VirtIO I/O message.
12 * @substream: VirtIO PCM substream.
13 * @xfer: Request header payload.
14 * @status: Response header payload.
15 * @length: Data length in bytes.
16 * @sgs: Payload scatter-gather table.
17 */
18 struct virtio_pcm_msg {
19 struct virtio_pcm_substream *substream;
20 struct virtio_snd_pcm_xfer xfer;
21 struct virtio_snd_pcm_status status;
22 size_t length;
23 struct scatterlist sgs[];
24 };
25
26 /**
27 * enum pcm_msg_sg_index - Index values for the virtio_pcm_msg->sgs field in
28 * an I/O message.
29 * @PCM_MSG_SG_XFER: Element containing a virtio_snd_pcm_xfer structure.
30 * @PCM_MSG_SG_STATUS: Element containing a virtio_snd_pcm_status structure.
31 * @PCM_MSG_SG_DATA: The first element containing a data buffer.
32 */
33 enum pcm_msg_sg_index {
34 PCM_MSG_SG_XFER = 0,
35 PCM_MSG_SG_STATUS,
36 PCM_MSG_SG_DATA
37 };
38
39 /**
40 * virtsnd_pcm_sg_num() - Count the number of sg-elements required to represent
41 * vmalloc'ed buffer.
42 * @data: Pointer to vmalloc'ed buffer.
43 * @length: Buffer size.
44 *
45 * Context: Any context.
46 * Return: Number of physically contiguous parts in the @data.
47 */
virtsnd_pcm_sg_num(u8 * data,unsigned int length)48 static int virtsnd_pcm_sg_num(u8 *data, unsigned int length)
49 {
50 phys_addr_t sg_address;
51 unsigned int sg_length;
52 int num = 0;
53
54 while (length) {
55 struct page *pg = vmalloc_to_page(data);
56 phys_addr_t pg_address = page_to_phys(pg);
57 size_t pg_length;
58
59 pg_length = PAGE_SIZE - offset_in_page(data);
60 if (pg_length > length)
61 pg_length = length;
62
63 if (!num || sg_address + sg_length != pg_address) {
64 sg_address = pg_address;
65 sg_length = pg_length;
66 num++;
67 } else {
68 sg_length += pg_length;
69 }
70
71 data += pg_length;
72 length -= pg_length;
73 }
74
75 return num;
76 }
77
78 /**
79 * virtsnd_pcm_sg_from() - Build sg-list from vmalloc'ed buffer.
80 * @sgs: Preallocated sg-list to populate.
81 * @nsgs: The maximum number of elements in the @sgs.
82 * @data: Pointer to vmalloc'ed buffer.
83 * @length: Buffer size.
84 *
85 * Splits the buffer into physically contiguous parts and makes an sg-list of
86 * such parts.
87 *
88 * Context: Any context.
89 */
virtsnd_pcm_sg_from(struct scatterlist * sgs,int nsgs,u8 * data,unsigned int length)90 static void virtsnd_pcm_sg_from(struct scatterlist *sgs, int nsgs, u8 *data,
91 unsigned int length)
92 {
93 int idx = -1;
94
95 while (length) {
96 struct page *pg = vmalloc_to_page(data);
97 size_t pg_length;
98
99 pg_length = PAGE_SIZE - offset_in_page(data);
100 if (pg_length > length)
101 pg_length = length;
102
103 if (idx == -1 ||
104 sg_phys(&sgs[idx]) + sgs[idx].length != page_to_phys(pg)) {
105 if (idx + 1 == nsgs)
106 break;
107 sg_set_page(&sgs[++idx], pg, pg_length,
108 offset_in_page(data));
109 } else {
110 sgs[idx].length += pg_length;
111 }
112
113 data += pg_length;
114 length -= pg_length;
115 }
116
117 sg_mark_end(&sgs[idx]);
118 }
119
120 /**
121 * virtsnd_pcm_msg_alloc() - Allocate I/O messages.
122 * @vss: VirtIO PCM substream.
123 * @periods: Current number of periods.
124 * @period_bytes: Current period size in bytes.
125 *
126 * The function slices the buffer into @periods parts (each with the size of
127 * @period_bytes), and creates @periods corresponding I/O messages.
128 *
129 * Context: Any context that permits to sleep.
130 * Return: 0 on success, -ENOMEM on failure.
131 */
virtsnd_pcm_msg_alloc(struct virtio_pcm_substream * vss,unsigned int periods,unsigned int period_bytes)132 int virtsnd_pcm_msg_alloc(struct virtio_pcm_substream *vss,
133 unsigned int periods, unsigned int period_bytes)
134 {
135 struct snd_pcm_runtime *runtime = vss->substream->runtime;
136 unsigned int i;
137
138 vss->msgs = kcalloc(periods, sizeof(*vss->msgs), GFP_KERNEL);
139 if (!vss->msgs)
140 return -ENOMEM;
141
142 vss->nmsgs = periods;
143
144 for (i = 0; i < periods; ++i) {
145 u8 *data = runtime->dma_area + period_bytes * i;
146 int sg_num = virtsnd_pcm_sg_num(data, period_bytes);
147 struct virtio_pcm_msg *msg;
148
149 msg = kzalloc(struct_size(msg, sgs, sg_num + 2), GFP_KERNEL);
150 if (!msg)
151 return -ENOMEM;
152
153 msg->substream = vss;
154 sg_init_one(&msg->sgs[PCM_MSG_SG_XFER], &msg->xfer,
155 sizeof(msg->xfer));
156 sg_init_one(&msg->sgs[PCM_MSG_SG_STATUS], &msg->status,
157 sizeof(msg->status));
158 msg->length = period_bytes;
159 virtsnd_pcm_sg_from(&msg->sgs[PCM_MSG_SG_DATA], sg_num, data,
160 period_bytes);
161
162 vss->msgs[i] = msg;
163 }
164
165 return 0;
166 }
167
168 /**
169 * virtsnd_pcm_msg_free() - Free all allocated I/O messages.
170 * @vss: VirtIO PCM substream.
171 *
172 * Context: Any context.
173 */
virtsnd_pcm_msg_free(struct virtio_pcm_substream * vss)174 void virtsnd_pcm_msg_free(struct virtio_pcm_substream *vss)
175 {
176 unsigned int i;
177
178 for (i = 0; vss->msgs && i < vss->nmsgs; ++i)
179 kfree(vss->msgs[i]);
180 kfree(vss->msgs);
181
182 vss->msgs = NULL;
183 vss->nmsgs = 0;
184 }
185
186 /**
187 * virtsnd_pcm_msg_send() - Send asynchronous I/O messages.
188 * @vss: VirtIO PCM substream.
189 *
190 * All messages are organized in an ordered circular list. Each time the
191 * function is called, all currently non-enqueued messages are added to the
192 * virtqueue. For this, the function keeps track of two values:
193 *
194 * msg_last_enqueued = index of the last enqueued message,
195 * msg_count = # of pending messages in the virtqueue.
196 *
197 * Context: Any context. Expects the tx/rx queue and the VirtIO substream
198 * spinlocks to be held by caller.
199 * Return: 0 on success, -errno on failure.
200 */
virtsnd_pcm_msg_send(struct virtio_pcm_substream * vss)201 int virtsnd_pcm_msg_send(struct virtio_pcm_substream *vss)
202 {
203 struct snd_pcm_runtime *runtime = vss->substream->runtime;
204 struct virtio_snd *snd = vss->snd;
205 struct virtio_device *vdev = snd->vdev;
206 struct virtqueue *vqueue = virtsnd_pcm_queue(vss)->vqueue;
207 int i;
208 int n;
209 bool notify = false;
210
211 i = (vss->msg_last_enqueued + 1) % runtime->periods;
212 n = runtime->periods - vss->msg_count;
213
214 for (; n; --n, i = (i + 1) % runtime->periods) {
215 struct virtio_pcm_msg *msg = vss->msgs[i];
216 struct scatterlist *psgs[] = {
217 &msg->sgs[PCM_MSG_SG_XFER],
218 &msg->sgs[PCM_MSG_SG_DATA],
219 &msg->sgs[PCM_MSG_SG_STATUS]
220 };
221 int rc;
222
223 msg->xfer.stream_id = cpu_to_le32(vss->sid);
224 memset(&msg->status, 0, sizeof(msg->status));
225
226 if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK)
227 rc = virtqueue_add_sgs(vqueue, psgs, 2, 1, msg,
228 GFP_ATOMIC);
229 else
230 rc = virtqueue_add_sgs(vqueue, psgs, 1, 2, msg,
231 GFP_ATOMIC);
232
233 if (rc) {
234 dev_err(&vdev->dev,
235 "SID %u: failed to send I/O message\n",
236 vss->sid);
237 return rc;
238 }
239
240 vss->msg_last_enqueued = i;
241 vss->msg_count++;
242 }
243
244 if (!(vss->features & (1U << VIRTIO_SND_PCM_F_MSG_POLLING)))
245 notify = virtqueue_kick_prepare(vqueue);
246
247 if (notify)
248 virtqueue_notify(vqueue);
249
250 return 0;
251 }
252
253 /**
254 * virtsnd_pcm_msg_pending_num() - Returns the number of pending I/O messages.
255 * @vss: VirtIO substream.
256 *
257 * Context: Any context.
258 * Return: Number of messages.
259 */
virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream * vss)260 unsigned int virtsnd_pcm_msg_pending_num(struct virtio_pcm_substream *vss)
261 {
262 unsigned int num;
263 unsigned long flags;
264
265 spin_lock_irqsave(&vss->lock, flags);
266 num = vss->msg_count;
267 spin_unlock_irqrestore(&vss->lock, flags);
268
269 return num;
270 }
271
272 /**
273 * virtsnd_pcm_msg_complete() - Complete an I/O message.
274 * @msg: I/O message.
275 * @written_bytes: Number of bytes written to the message.
276 *
277 * Completion of the message means the elapsed period. If transmission is
278 * allowed, then each completed message is immediately placed back at the end
279 * of the queue.
280 *
281 * For the playback substream, @written_bytes is equal to sizeof(msg->status).
282 *
283 * For the capture substream, @written_bytes is equal to sizeof(msg->status)
284 * plus the number of captured bytes.
285 *
286 * Context: Interrupt context. Takes and releases the VirtIO substream spinlock.
287 */
virtsnd_pcm_msg_complete(struct virtio_pcm_msg * msg,size_t written_bytes)288 static void virtsnd_pcm_msg_complete(struct virtio_pcm_msg *msg,
289 size_t written_bytes)
290 {
291 struct virtio_pcm_substream *vss = msg->substream;
292
293 /*
294 * hw_ptr always indicates the buffer position of the first I/O message
295 * in the virtqueue. Therefore, on each completion of an I/O message,
296 * the hw_ptr value is unconditionally advanced.
297 */
298 spin_lock(&vss->lock);
299 /*
300 * If the capture substream returned an incorrect status, then just
301 * increase the hw_ptr by the message size.
302 */
303 if (vss->direction == SNDRV_PCM_STREAM_PLAYBACK ||
304 written_bytes <= sizeof(msg->status))
305 vss->hw_ptr += msg->length;
306 else
307 vss->hw_ptr += written_bytes - sizeof(msg->status);
308
309 if (vss->hw_ptr >= vss->buffer_bytes)
310 vss->hw_ptr -= vss->buffer_bytes;
311
312 vss->xfer_xrun = false;
313 vss->msg_count--;
314
315 if (vss->xfer_enabled) {
316 struct snd_pcm_runtime *runtime = vss->substream->runtime;
317
318 runtime->delay =
319 bytes_to_frames(runtime,
320 le32_to_cpu(msg->status.latency_bytes));
321
322 schedule_work(&vss->elapsed_period);
323
324 virtsnd_pcm_msg_send(vss);
325 } else if (!vss->msg_count) {
326 wake_up_all(&vss->msg_empty);
327 }
328 spin_unlock(&vss->lock);
329 }
330
331 /**
332 * virtsnd_pcm_notify_cb() - Process all completed I/O messages.
333 * @queue: Underlying tx/rx virtqueue.
334 *
335 * Context: Interrupt context. Takes and releases the tx/rx queue spinlock.
336 */
virtsnd_pcm_notify_cb(struct virtio_snd_queue * queue)337 static inline void virtsnd_pcm_notify_cb(struct virtio_snd_queue *queue)
338 {
339 struct virtio_pcm_msg *msg;
340 u32 written_bytes;
341 unsigned long flags;
342
343 spin_lock_irqsave(&queue->lock, flags);
344 do {
345 virtqueue_disable_cb(queue->vqueue);
346 while ((msg = virtqueue_get_buf(queue->vqueue, &written_bytes)))
347 virtsnd_pcm_msg_complete(msg, written_bytes);
348 if (unlikely(virtqueue_is_broken(queue->vqueue)))
349 break;
350 } while (!virtqueue_enable_cb(queue->vqueue));
351 spin_unlock_irqrestore(&queue->lock, flags);
352 }
353
354 /**
355 * virtsnd_pcm_tx_notify_cb() - Process all completed TX messages.
356 * @vqueue: Underlying tx virtqueue.
357 *
358 * Context: Interrupt context.
359 */
virtsnd_pcm_tx_notify_cb(struct virtqueue * vqueue)360 void virtsnd_pcm_tx_notify_cb(struct virtqueue *vqueue)
361 {
362 struct virtio_snd *snd = vqueue->vdev->priv;
363
364 virtsnd_pcm_notify_cb(virtsnd_tx_queue(snd));
365 }
366
367 /**
368 * virtsnd_pcm_rx_notify_cb() - Process all completed RX messages.
369 * @vqueue: Underlying rx virtqueue.
370 *
371 * Context: Interrupt context.
372 */
virtsnd_pcm_rx_notify_cb(struct virtqueue * vqueue)373 void virtsnd_pcm_rx_notify_cb(struct virtqueue *vqueue)
374 {
375 struct virtio_snd *snd = vqueue->vdev->priv;
376
377 virtsnd_pcm_notify_cb(virtsnd_rx_queue(snd));
378 }
379
380 /**
381 * virtsnd_pcm_ctl_msg_alloc() - Allocate and initialize the PCM device control
382 * message for the specified substream.
383 * @vss: VirtIO PCM substream.
384 * @command: Control request code (VIRTIO_SND_R_PCM_XXX).
385 * @gfp: Kernel flags for memory allocation.
386 *
387 * Context: Any context. May sleep if @gfp flags permit.
388 * Return: Allocated message on success, NULL on failure.
389 */
390 struct virtio_snd_msg *
virtsnd_pcm_ctl_msg_alloc(struct virtio_pcm_substream * vss,unsigned int command,gfp_t gfp)391 virtsnd_pcm_ctl_msg_alloc(struct virtio_pcm_substream *vss,
392 unsigned int command, gfp_t gfp)
393 {
394 size_t request_size = sizeof(struct virtio_snd_pcm_hdr);
395 size_t response_size = sizeof(struct virtio_snd_hdr);
396 struct virtio_snd_msg *msg;
397
398 switch (command) {
399 case VIRTIO_SND_R_PCM_SET_PARAMS:
400 request_size = sizeof(struct virtio_snd_pcm_set_params);
401 break;
402 }
403
404 msg = virtsnd_ctl_msg_alloc(request_size, response_size, gfp);
405 if (msg) {
406 struct virtio_snd_pcm_hdr *hdr = virtsnd_ctl_msg_request(msg);
407
408 hdr->hdr.code = cpu_to_le32(command);
409 hdr->stream_id = cpu_to_le32(vss->sid);
410 }
411
412 return msg;
413 }
414