1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright 2018-2020 Broadcom.
4 */
5
6 #include <linux/delay.h>
7 #include <linux/fs.h>
8 #include <linux/hash.h>
9 #include <linux/interrupt.h>
10 #include <linux/list.h>
11 #include <linux/module.h>
12 #include <linux/poll.h>
13 #include <linux/sizes.h>
14 #include <linux/spinlock.h>
15 #include <linux/timer.h>
16
17 #include "bcm_vk.h"
18 #include "bcm_vk_msg.h"
19 #include "bcm_vk_sg.h"
20
21 /* functions to manipulate the transport id in msg block */
22 #define BCM_VK_MSG_Q_SHIFT 4
23 #define BCM_VK_MSG_Q_MASK 0xF
24 #define BCM_VK_MSG_ID_MASK 0xFFF
25
26 #define BCM_VK_DMA_DRAIN_MAX_MS 2000
27
28 /* number x q_size will be the max number of msg processed per loop */
29 #define BCM_VK_MSG_PROC_MAX_LOOP 2
30
31 /* module parameter */
32 static bool hb_mon = true;
33 module_param(hb_mon, bool, 0444);
34 MODULE_PARM_DESC(hb_mon, "Monitoring heartbeat continuously.\n");
35 static int batch_log = 1;
36 module_param(batch_log, int, 0444);
37 MODULE_PARM_DESC(batch_log, "Max num of logs per batch operation.\n");
38
hb_mon_is_on(void)39 static bool hb_mon_is_on(void)
40 {
41 return hb_mon;
42 }
43
get_q_num(const struct vk_msg_blk * msg)44 static u32 get_q_num(const struct vk_msg_blk *msg)
45 {
46 u32 q_num = msg->trans_id & BCM_VK_MSG_Q_MASK;
47
48 if (q_num >= VK_MSGQ_PER_CHAN_MAX)
49 q_num = VK_MSGQ_NUM_DEFAULT;
50 return q_num;
51 }
52
set_q_num(struct vk_msg_blk * msg,u32 q_num)53 static void set_q_num(struct vk_msg_blk *msg, u32 q_num)
54 {
55 u32 trans_q;
56
57 if (q_num >= VK_MSGQ_PER_CHAN_MAX)
58 trans_q = VK_MSGQ_NUM_DEFAULT;
59 else
60 trans_q = q_num;
61
62 msg->trans_id = (msg->trans_id & ~BCM_VK_MSG_Q_MASK) | trans_q;
63 }
64
get_msg_id(const struct vk_msg_blk * msg)65 static u32 get_msg_id(const struct vk_msg_blk *msg)
66 {
67 return ((msg->trans_id >> BCM_VK_MSG_Q_SHIFT) & BCM_VK_MSG_ID_MASK);
68 }
69
set_msg_id(struct vk_msg_blk * msg,u32 val)70 static void set_msg_id(struct vk_msg_blk *msg, u32 val)
71 {
72 msg->trans_id = (val << BCM_VK_MSG_Q_SHIFT) | get_q_num(msg);
73 }
74
msgq_inc(const struct bcm_vk_sync_qinfo * qinfo,u32 idx,u32 inc)75 static u32 msgq_inc(const struct bcm_vk_sync_qinfo *qinfo, u32 idx, u32 inc)
76 {
77 return ((idx + inc) & qinfo->q_mask);
78 }
79
80 static
msgq_blk_addr(const struct bcm_vk_sync_qinfo * qinfo,u32 idx)81 struct vk_msg_blk __iomem *msgq_blk_addr(const struct bcm_vk_sync_qinfo *qinfo,
82 u32 idx)
83 {
84 return qinfo->q_start + (VK_MSGQ_BLK_SIZE * idx);
85 }
86
msgq_occupied(const struct bcm_vk_msgq __iomem * msgq,const struct bcm_vk_sync_qinfo * qinfo)87 static u32 msgq_occupied(const struct bcm_vk_msgq __iomem *msgq,
88 const struct bcm_vk_sync_qinfo *qinfo)
89 {
90 u32 wr_idx, rd_idx;
91
92 wr_idx = readl_relaxed(&msgq->wr_idx);
93 rd_idx = readl_relaxed(&msgq->rd_idx);
94
95 return ((wr_idx - rd_idx) & qinfo->q_mask);
96 }
97
98 static
msgq_avail_space(const struct bcm_vk_msgq __iomem * msgq,const struct bcm_vk_sync_qinfo * qinfo)99 u32 msgq_avail_space(const struct bcm_vk_msgq __iomem *msgq,
100 const struct bcm_vk_sync_qinfo *qinfo)
101 {
102 return (qinfo->q_size - msgq_occupied(msgq, qinfo) - 1);
103 }
104
105 /* number of retries when enqueue message fails before returning EAGAIN */
106 #define BCM_VK_H2VK_ENQ_RETRY 10
107 #define BCM_VK_H2VK_ENQ_RETRY_DELAY_MS 50
108
bcm_vk_drv_access_ok(struct bcm_vk * vk)109 bool bcm_vk_drv_access_ok(struct bcm_vk *vk)
110 {
111 return (!!atomic_read(&vk->msgq_inited));
112 }
113
bcm_vk_set_host_alert(struct bcm_vk * vk,u32 bit_mask)114 void bcm_vk_set_host_alert(struct bcm_vk *vk, u32 bit_mask)
115 {
116 struct bcm_vk_alert *alert = &vk->host_alert;
117 unsigned long flags;
118
119 /* use irqsave version as this maybe called inside timer interrupt */
120 spin_lock_irqsave(&vk->host_alert_lock, flags);
121 alert->notfs |= bit_mask;
122 spin_unlock_irqrestore(&vk->host_alert_lock, flags);
123
124 if (test_and_set_bit(BCM_VK_WQ_NOTF_PEND, vk->wq_offload) == 0)
125 queue_work(vk->wq_thread, &vk->wq_work);
126 }
127
128 /*
129 * Heartbeat related defines
130 * The heartbeat from host is a last resort. If stuck condition happens
131 * on the card, firmware is supposed to detect it. Therefore, the heartbeat
132 * values used will be more relaxed on the driver, which need to be bigger
133 * than the watchdog timeout on the card. The watchdog timeout on the card
134 * is 20s, with a jitter of 2s => 22s. We use a value of 27s here.
135 */
136 #define BCM_VK_HB_TIMER_S 3
137 #define BCM_VK_HB_TIMER_VALUE (BCM_VK_HB_TIMER_S * HZ)
138 #define BCM_VK_HB_LOST_MAX (27 / BCM_VK_HB_TIMER_S)
139
bcm_vk_hb_poll(struct timer_list * t)140 static void bcm_vk_hb_poll(struct timer_list *t)
141 {
142 u32 uptime_s;
143 struct bcm_vk_hb_ctrl *hb = container_of(t, struct bcm_vk_hb_ctrl,
144 timer);
145 struct bcm_vk *vk = container_of(hb, struct bcm_vk, hb_ctrl);
146
147 if (bcm_vk_drv_access_ok(vk) && hb_mon_is_on()) {
148 /* read uptime from register and compare */
149 uptime_s = vkread32(vk, BAR_0, BAR_OS_UPTIME);
150
151 if (uptime_s == hb->last_uptime)
152 hb->lost_cnt++;
153 else /* reset to avoid accumulation */
154 hb->lost_cnt = 0;
155
156 dev_dbg(&vk->pdev->dev, "Last uptime %d current %d, lost %d\n",
157 hb->last_uptime, uptime_s, hb->lost_cnt);
158
159 /*
160 * if the interface goes down without any activity, a value
161 * of 0xFFFFFFFF will be continuously read, and the detection
162 * will be happened eventually.
163 */
164 hb->last_uptime = uptime_s;
165 } else {
166 /* reset heart beat lost cnt */
167 hb->lost_cnt = 0;
168 }
169
170 /* next, check if heartbeat exceeds limit */
171 if (hb->lost_cnt > BCM_VK_HB_LOST_MAX) {
172 dev_err(&vk->pdev->dev, "Heartbeat Misses %d times, %d s!\n",
173 BCM_VK_HB_LOST_MAX,
174 BCM_VK_HB_LOST_MAX * BCM_VK_HB_TIMER_S);
175
176 bcm_vk_blk_drv_access(vk);
177 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_HB_FAIL);
178 }
179 /* re-arm timer */
180 mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
181 }
182
bcm_vk_hb_init(struct bcm_vk * vk)183 void bcm_vk_hb_init(struct bcm_vk *vk)
184 {
185 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
186
187 timer_setup(&hb->timer, bcm_vk_hb_poll, 0);
188 mod_timer(&hb->timer, jiffies + BCM_VK_HB_TIMER_VALUE);
189 }
190
bcm_vk_hb_deinit(struct bcm_vk * vk)191 void bcm_vk_hb_deinit(struct bcm_vk *vk)
192 {
193 struct bcm_vk_hb_ctrl *hb = &vk->hb_ctrl;
194
195 del_timer(&hb->timer);
196 }
197
bcm_vk_msgid_bitmap_clear(struct bcm_vk * vk,unsigned int start,unsigned int nbits)198 static void bcm_vk_msgid_bitmap_clear(struct bcm_vk *vk,
199 unsigned int start,
200 unsigned int nbits)
201 {
202 spin_lock(&vk->msg_id_lock);
203 bitmap_clear(vk->bmap, start, nbits);
204 spin_unlock(&vk->msg_id_lock);
205 }
206
207 /*
208 * allocate a ctx per file struct
209 */
bcm_vk_get_ctx(struct bcm_vk * vk,const pid_t pid)210 static struct bcm_vk_ctx *bcm_vk_get_ctx(struct bcm_vk *vk, const pid_t pid)
211 {
212 u32 i;
213 struct bcm_vk_ctx *ctx = NULL;
214 u32 hash_idx = hash_32(pid, VK_PID_HT_SHIFT_BIT);
215
216 spin_lock(&vk->ctx_lock);
217
218 /* check if it is in reset, if so, don't allow */
219 if (vk->reset_pid) {
220 dev_err(&vk->pdev->dev,
221 "No context allowed during reset by pid %d\n",
222 vk->reset_pid);
223
224 goto in_reset_exit;
225 }
226
227 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
228 if (!vk->ctx[i].in_use) {
229 vk->ctx[i].in_use = true;
230 ctx = &vk->ctx[i];
231 break;
232 }
233 }
234
235 if (!ctx) {
236 dev_err(&vk->pdev->dev, "All context in use\n");
237
238 goto all_in_use_exit;
239 }
240
241 /* set the pid and insert it to hash table */
242 ctx->pid = pid;
243 ctx->hash_idx = hash_idx;
244 list_add_tail(&ctx->node, &vk->pid_ht[hash_idx].head);
245
246 /* increase kref */
247 kref_get(&vk->kref);
248
249 /* clear counter */
250 atomic_set(&ctx->pend_cnt, 0);
251 atomic_set(&ctx->dma_cnt, 0);
252 init_waitqueue_head(&ctx->rd_wq);
253
254 all_in_use_exit:
255 in_reset_exit:
256 spin_unlock(&vk->ctx_lock);
257
258 return ctx;
259 }
260
bcm_vk_get_msg_id(struct bcm_vk * vk)261 static u16 bcm_vk_get_msg_id(struct bcm_vk *vk)
262 {
263 u16 rc = VK_MSG_ID_OVERFLOW;
264 u16 test_bit_count = 0;
265
266 spin_lock(&vk->msg_id_lock);
267 while (test_bit_count < (VK_MSG_ID_BITMAP_SIZE - 1)) {
268 /*
269 * first time come in this loop, msg_id will be 0
270 * and the first one tested will be 1. We skip
271 * VK_SIMPLEX_MSG_ID (0) for one way host2vk
272 * communication
273 */
274 vk->msg_id++;
275 if (vk->msg_id == VK_MSG_ID_BITMAP_SIZE)
276 vk->msg_id = 1;
277
278 if (test_bit(vk->msg_id, vk->bmap)) {
279 test_bit_count++;
280 continue;
281 }
282 rc = vk->msg_id;
283 bitmap_set(vk->bmap, vk->msg_id, 1);
284 break;
285 }
286 spin_unlock(&vk->msg_id_lock);
287
288 return rc;
289 }
290
bcm_vk_free_ctx(struct bcm_vk * vk,struct bcm_vk_ctx * ctx)291 static int bcm_vk_free_ctx(struct bcm_vk *vk, struct bcm_vk_ctx *ctx)
292 {
293 u32 idx;
294 u32 hash_idx;
295 pid_t pid;
296 struct bcm_vk_ctx *entry;
297 int count = 0;
298
299 if (!ctx) {
300 dev_err(&vk->pdev->dev, "NULL context detected\n");
301 return -EINVAL;
302 }
303 idx = ctx->idx;
304 pid = ctx->pid;
305
306 spin_lock(&vk->ctx_lock);
307
308 if (!vk->ctx[idx].in_use) {
309 dev_err(&vk->pdev->dev, "context[%d] not in use!\n", idx);
310 } else {
311 vk->ctx[idx].in_use = false;
312 vk->ctx[idx].miscdev = NULL;
313
314 /* Remove it from hash list and see if it is the last one. */
315 list_del(&ctx->node);
316 hash_idx = ctx->hash_idx;
317 list_for_each_entry(entry, &vk->pid_ht[hash_idx].head, node) {
318 if (entry->pid == pid)
319 count++;
320 }
321 }
322
323 spin_unlock(&vk->ctx_lock);
324
325 return count;
326 }
327
bcm_vk_free_wkent(struct device * dev,struct bcm_vk_wkent * entry)328 static void bcm_vk_free_wkent(struct device *dev, struct bcm_vk_wkent *entry)
329 {
330 int proc_cnt;
331
332 bcm_vk_sg_free(dev, entry->dma, VK_DMA_MAX_ADDRS, &proc_cnt);
333 if (proc_cnt)
334 atomic_dec(&entry->ctx->dma_cnt);
335
336 kfree(entry->to_h_msg);
337 kfree(entry);
338 }
339
bcm_vk_drain_all_pend(struct device * dev,struct bcm_vk_msg_chan * chan,struct bcm_vk_ctx * ctx)340 static void bcm_vk_drain_all_pend(struct device *dev,
341 struct bcm_vk_msg_chan *chan,
342 struct bcm_vk_ctx *ctx)
343 {
344 u32 num;
345 struct bcm_vk_wkent *entry, *tmp;
346 struct bcm_vk *vk;
347 struct list_head del_q;
348
349 if (ctx)
350 vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
351
352 INIT_LIST_HEAD(&del_q);
353 spin_lock(&chan->pendq_lock);
354 for (num = 0; num < chan->q_nr; num++) {
355 list_for_each_entry_safe(entry, tmp, &chan->pendq[num], node) {
356 if ((!ctx) || (entry->ctx->idx == ctx->idx)) {
357 list_move_tail(&entry->node, &del_q);
358 }
359 }
360 }
361 spin_unlock(&chan->pendq_lock);
362
363 /* batch clean up */
364 num = 0;
365 list_for_each_entry_safe(entry, tmp, &del_q, node) {
366 list_del(&entry->node);
367 num++;
368 if (ctx) {
369 struct vk_msg_blk *msg;
370 int bit_set;
371 bool responded;
372 u32 msg_id;
373
374 /* if it is specific ctx, log for any stuck */
375 msg = entry->to_v_msg;
376 msg_id = get_msg_id(msg);
377 bit_set = test_bit(msg_id, vk->bmap);
378 responded = entry->to_h_msg ? true : false;
379 if (num <= batch_log)
380 dev_info(dev,
381 "Drained: fid %u size %u msg 0x%x(seq-%x) ctx 0x%x[fd-%d] args:[0x%x 0x%x] resp %s, bmap %d\n",
382 msg->function_id, msg->size,
383 msg_id, entry->seq_num,
384 msg->context_id, entry->ctx->idx,
385 msg->cmd, msg->arg,
386 responded ? "T" : "F", bit_set);
387 if (responded)
388 atomic_dec(&ctx->pend_cnt);
389 else if (bit_set)
390 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
391 }
392 bcm_vk_free_wkent(dev, entry);
393 }
394 if (num && ctx)
395 dev_info(dev, "Total drained items %d [fd-%d]\n",
396 num, ctx->idx);
397 }
398
bcm_vk_drain_msg_on_reset(struct bcm_vk * vk)399 void bcm_vk_drain_msg_on_reset(struct bcm_vk *vk)
400 {
401 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
402 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
403 }
404
405 /*
406 * Function to sync up the messages queue info that is provided by BAR1
407 */
bcm_vk_sync_msgq(struct bcm_vk * vk,bool force_sync)408 int bcm_vk_sync_msgq(struct bcm_vk *vk, bool force_sync)
409 {
410 struct bcm_vk_msgq __iomem *msgq;
411 struct device *dev = &vk->pdev->dev;
412 u32 msgq_off;
413 u32 num_q;
414 struct bcm_vk_msg_chan *chan_list[] = {&vk->to_v_msg_chan,
415 &vk->to_h_msg_chan};
416 struct bcm_vk_msg_chan *chan;
417 int i, j;
418 int ret = 0;
419
420 /*
421 * If the driver is loaded at startup where vk OS is not up yet,
422 * the msgq-info may not be available until a later time. In
423 * this case, we skip and the sync function is supposed to be
424 * called again.
425 */
426 if (!bcm_vk_msgq_marker_valid(vk)) {
427 dev_info(dev, "BAR1 msgq marker not initialized.\n");
428 return -EAGAIN;
429 }
430
431 msgq_off = vkread32(vk, BAR_1, VK_BAR1_MSGQ_CTRL_OFF);
432
433 /* each side is always half the total */
434 num_q = vkread32(vk, BAR_1, VK_BAR1_MSGQ_NR) / 2;
435 if (!num_q || (num_q > VK_MSGQ_PER_CHAN_MAX)) {
436 dev_err(dev,
437 "Advertised msgq %d error - max %d allowed\n",
438 num_q, VK_MSGQ_PER_CHAN_MAX);
439 return -EINVAL;
440 }
441
442 vk->to_v_msg_chan.q_nr = num_q;
443 vk->to_h_msg_chan.q_nr = num_q;
444
445 /* first msgq location */
446 msgq = vk->bar[BAR_1] + msgq_off;
447
448 /*
449 * if this function is called when it is already inited,
450 * something is wrong
451 */
452 if (bcm_vk_drv_access_ok(vk) && !force_sync) {
453 dev_err(dev, "Msgq info already in sync\n");
454 return -EPERM;
455 }
456
457 for (i = 0; i < ARRAY_SIZE(chan_list); i++) {
458 chan = chan_list[i];
459 memset(chan->sync_qinfo, 0, sizeof(chan->sync_qinfo));
460
461 for (j = 0; j < num_q; j++) {
462 struct bcm_vk_sync_qinfo *qinfo;
463 u32 msgq_start;
464 u32 msgq_size;
465 u32 msgq_nxt;
466 u32 msgq_db_offset, q_db_offset;
467
468 chan->msgq[j] = msgq;
469 msgq_start = readl_relaxed(&msgq->start);
470 msgq_size = readl_relaxed(&msgq->size);
471 msgq_nxt = readl_relaxed(&msgq->nxt);
472 msgq_db_offset = readl_relaxed(&msgq->db_offset);
473 q_db_offset = (msgq_db_offset & ((1 << DB_SHIFT) - 1));
474 if (q_db_offset == (~msgq_db_offset >> DB_SHIFT))
475 msgq_db_offset = q_db_offset;
476 else
477 /* fall back to default */
478 msgq_db_offset = VK_BAR0_Q_DB_BASE(j);
479
480 dev_info(dev,
481 "MsgQ[%d] type %d num %d, @ 0x%x, db_offset 0x%x rd_idx %d wr_idx %d, size %d, nxt 0x%x\n",
482 j,
483 readw_relaxed(&msgq->type),
484 readw_relaxed(&msgq->num),
485 msgq_start,
486 msgq_db_offset,
487 readl_relaxed(&msgq->rd_idx),
488 readl_relaxed(&msgq->wr_idx),
489 msgq_size,
490 msgq_nxt);
491
492 qinfo = &chan->sync_qinfo[j];
493 /* formulate and record static info */
494 qinfo->q_start = vk->bar[BAR_1] + msgq_start;
495 qinfo->q_size = msgq_size;
496 /* set low threshold as 50% or 1/2 */
497 qinfo->q_low = qinfo->q_size >> 1;
498 qinfo->q_mask = qinfo->q_size - 1;
499 qinfo->q_db_offset = msgq_db_offset;
500
501 msgq++;
502 }
503 }
504 atomic_set(&vk->msgq_inited, 1);
505
506 return ret;
507 }
508
bcm_vk_msg_chan_init(struct bcm_vk_msg_chan * chan)509 static int bcm_vk_msg_chan_init(struct bcm_vk_msg_chan *chan)
510 {
511 u32 i;
512
513 mutex_init(&chan->msgq_mutex);
514 spin_lock_init(&chan->pendq_lock);
515 for (i = 0; i < VK_MSGQ_MAX_NR; i++)
516 INIT_LIST_HEAD(&chan->pendq[i]);
517
518 return 0;
519 }
520
bcm_vk_append_pendq(struct bcm_vk_msg_chan * chan,u16 q_num,struct bcm_vk_wkent * entry)521 static void bcm_vk_append_pendq(struct bcm_vk_msg_chan *chan, u16 q_num,
522 struct bcm_vk_wkent *entry)
523 {
524 struct bcm_vk_ctx *ctx;
525
526 spin_lock(&chan->pendq_lock);
527 list_add_tail(&entry->node, &chan->pendq[q_num]);
528 if (entry->to_h_msg) {
529 ctx = entry->ctx;
530 atomic_inc(&ctx->pend_cnt);
531 wake_up_interruptible(&ctx->rd_wq);
532 }
533 spin_unlock(&chan->pendq_lock);
534 }
535
bcm_vk_append_ib_sgl(struct bcm_vk * vk,struct bcm_vk_wkent * entry,struct _vk_data * data,unsigned int num_planes)536 static u32 bcm_vk_append_ib_sgl(struct bcm_vk *vk,
537 struct bcm_vk_wkent *entry,
538 struct _vk_data *data,
539 unsigned int num_planes)
540 {
541 unsigned int i;
542 unsigned int item_cnt = 0;
543 struct device *dev = &vk->pdev->dev;
544 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
545 struct vk_msg_blk *msg = &entry->to_v_msg[0];
546 struct bcm_vk_msgq __iomem *msgq;
547 struct bcm_vk_sync_qinfo *qinfo;
548 u32 ib_sgl_size = 0;
549 u8 *buf = (u8 *)&entry->to_v_msg[entry->to_v_blks];
550 u32 avail;
551 u32 q_num;
552
553 /* check if high watermark is hit, and if so, skip */
554 q_num = get_q_num(msg);
555 msgq = chan->msgq[q_num];
556 qinfo = &chan->sync_qinfo[q_num];
557 avail = msgq_avail_space(msgq, qinfo);
558 if (avail < qinfo->q_low) {
559 dev_dbg(dev, "Skip inserting inband SGL, [0x%x/0x%x]\n",
560 avail, qinfo->q_size);
561 return 0;
562 }
563
564 for (i = 0; i < num_planes; i++) {
565 if (data[i].address &&
566 (ib_sgl_size + data[i].size) <= vk->ib_sgl_size) {
567 item_cnt++;
568 memcpy(buf, entry->dma[i].sglist, data[i].size);
569 ib_sgl_size += data[i].size;
570 buf += data[i].size;
571 }
572 }
573
574 dev_dbg(dev, "Num %u sgl items appended, size 0x%x, room 0x%x\n",
575 item_cnt, ib_sgl_size, vk->ib_sgl_size);
576
577 /* round up size */
578 ib_sgl_size = (ib_sgl_size + VK_MSGQ_BLK_SIZE - 1)
579 >> VK_MSGQ_BLK_SZ_SHIFT;
580
581 return ib_sgl_size;
582 }
583
bcm_to_v_q_doorbell(struct bcm_vk * vk,u32 q_num,u32 db_val)584 void bcm_to_v_q_doorbell(struct bcm_vk *vk, u32 q_num, u32 db_val)
585 {
586 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
587 struct bcm_vk_sync_qinfo *qinfo = &chan->sync_qinfo[q_num];
588
589 vkwrite32(vk, db_val, BAR_0, qinfo->q_db_offset);
590 }
591
bcm_to_v_msg_enqueue(struct bcm_vk * vk,struct bcm_vk_wkent * entry)592 static int bcm_to_v_msg_enqueue(struct bcm_vk *vk, struct bcm_vk_wkent *entry)
593 {
594 static u32 seq_num;
595 struct bcm_vk_msg_chan *chan = &vk->to_v_msg_chan;
596 struct device *dev = &vk->pdev->dev;
597 struct vk_msg_blk *src = &entry->to_v_msg[0];
598
599 struct vk_msg_blk __iomem *dst;
600 struct bcm_vk_msgq __iomem *msgq;
601 struct bcm_vk_sync_qinfo *qinfo;
602 u32 q_num = get_q_num(src);
603 u32 wr_idx; /* local copy */
604 u32 i;
605 u32 avail;
606 u32 retry;
607
608 if (entry->to_v_blks != src->size + 1) {
609 dev_err(dev, "number of blks %d not matching %d MsgId[0x%x]: func %d ctx 0x%x\n",
610 entry->to_v_blks,
611 src->size + 1,
612 get_msg_id(src),
613 src->function_id,
614 src->context_id);
615 return -EMSGSIZE;
616 }
617
618 msgq = chan->msgq[q_num];
619 qinfo = &chan->sync_qinfo[q_num];
620
621 mutex_lock(&chan->msgq_mutex);
622
623 avail = msgq_avail_space(msgq, qinfo);
624
625 /* if not enough space, return EAGAIN and let app handles it */
626 retry = 0;
627 while ((avail < entry->to_v_blks) &&
628 (retry++ < BCM_VK_H2VK_ENQ_RETRY)) {
629 mutex_unlock(&chan->msgq_mutex);
630
631 msleep(BCM_VK_H2VK_ENQ_RETRY_DELAY_MS);
632 mutex_lock(&chan->msgq_mutex);
633 avail = msgq_avail_space(msgq, qinfo);
634 }
635 if (retry > BCM_VK_H2VK_ENQ_RETRY) {
636 mutex_unlock(&chan->msgq_mutex);
637 return -EAGAIN;
638 }
639
640 /* at this point, mutex is taken and there is enough space */
641 entry->seq_num = seq_num++; /* update debug seq number */
642 wr_idx = readl_relaxed(&msgq->wr_idx);
643
644 if (wr_idx >= qinfo->q_size) {
645 dev_crit(dev, "Invalid wr_idx 0x%x => max 0x%x!",
646 wr_idx, qinfo->q_size);
647 bcm_vk_blk_drv_access(vk);
648 bcm_vk_set_host_alert(vk, ERR_LOG_HOST_PCIE_DWN);
649 goto idx_err;
650 }
651
652 dst = msgq_blk_addr(qinfo, wr_idx);
653 for (i = 0; i < entry->to_v_blks; i++) {
654 memcpy_toio(dst, src, sizeof(*dst));
655
656 src++;
657 wr_idx = msgq_inc(qinfo, wr_idx, 1);
658 dst = msgq_blk_addr(qinfo, wr_idx);
659 }
660
661 /* flush the write pointer */
662 writel(wr_idx, &msgq->wr_idx);
663
664 /* log new info for debugging */
665 dev_dbg(dev,
666 "MsgQ[%d] [Rd Wr] = [%d %d] blks inserted %d - Q = [u-%d a-%d]/%d\n",
667 readl_relaxed(&msgq->num),
668 readl_relaxed(&msgq->rd_idx),
669 wr_idx,
670 entry->to_v_blks,
671 msgq_occupied(msgq, qinfo),
672 msgq_avail_space(msgq, qinfo),
673 readl_relaxed(&msgq->size));
674 /*
675 * press door bell based on queue number. 1 is added to the wr_idx
676 * to avoid the value of 0 appearing on the VK side to distinguish
677 * from initial value.
678 */
679 bcm_to_v_q_doorbell(vk, q_num, wr_idx + 1);
680 idx_err:
681 mutex_unlock(&chan->msgq_mutex);
682 return 0;
683 }
684
bcm_vk_send_shutdown_msg(struct bcm_vk * vk,u32 shut_type,const pid_t pid,const u32 q_num)685 int bcm_vk_send_shutdown_msg(struct bcm_vk *vk, u32 shut_type,
686 const pid_t pid, const u32 q_num)
687 {
688 int rc = 0;
689 struct bcm_vk_wkent *entry;
690 struct device *dev = &vk->pdev->dev;
691
692 /*
693 * check if the marker is still good. Sometimes, the PCIe interface may
694 * have gone done, and if so and we ship down thing based on broken
695 * values, kernel may panic.
696 */
697 if (!bcm_vk_msgq_marker_valid(vk)) {
698 dev_info(dev, "PCIe comm chan - invalid marker (0x%x)!\n",
699 vkread32(vk, BAR_1, VK_BAR1_MSGQ_DEF_RDY));
700 return -EINVAL;
701 }
702
703 entry = kzalloc(struct_size(entry, to_v_msg, 1), GFP_KERNEL);
704 if (!entry)
705 return -ENOMEM;
706
707 /* fill up necessary data */
708 entry->to_v_msg[0].function_id = VK_FID_SHUTDOWN;
709 set_q_num(&entry->to_v_msg[0], q_num);
710 set_msg_id(&entry->to_v_msg[0], VK_SIMPLEX_MSG_ID);
711 entry->to_v_blks = 1; /* always 1 block */
712
713 entry->to_v_msg[0].cmd = shut_type;
714 entry->to_v_msg[0].arg = pid;
715
716 rc = bcm_to_v_msg_enqueue(vk, entry);
717 if (rc)
718 dev_err(dev,
719 "Sending shutdown message to q %d for pid %d fails.\n",
720 get_q_num(&entry->to_v_msg[0]), pid);
721
722 kfree(entry);
723
724 return rc;
725 }
726
bcm_vk_handle_last_sess(struct bcm_vk * vk,const pid_t pid,const u32 q_num)727 static int bcm_vk_handle_last_sess(struct bcm_vk *vk, const pid_t pid,
728 const u32 q_num)
729 {
730 int rc = 0;
731 struct device *dev = &vk->pdev->dev;
732
733 /*
734 * don't send down or do anything if message queue is not initialized
735 * and if it is the reset session, clear it.
736 */
737 if (!bcm_vk_drv_access_ok(vk)) {
738 if (vk->reset_pid == pid)
739 vk->reset_pid = 0;
740 return -EPERM;
741 }
742
743 dev_dbg(dev, "No more sessions, shut down pid %d\n", pid);
744
745 /* only need to do it if it is not the reset process */
746 if (vk->reset_pid != pid)
747 rc = bcm_vk_send_shutdown_msg(vk, VK_SHUTDOWN_PID, pid, q_num);
748 else
749 /* put reset_pid to 0 if it is exiting last session */
750 vk->reset_pid = 0;
751
752 return rc;
753 }
754
bcm_vk_dequeue_pending(struct bcm_vk * vk,struct bcm_vk_msg_chan * chan,u16 q_num,u16 msg_id)755 static struct bcm_vk_wkent *bcm_vk_dequeue_pending(struct bcm_vk *vk,
756 struct bcm_vk_msg_chan *chan,
757 u16 q_num,
758 u16 msg_id)
759 {
760 bool found = false;
761 struct bcm_vk_wkent *entry;
762
763 spin_lock(&chan->pendq_lock);
764 list_for_each_entry(entry, &chan->pendq[q_num], node) {
765 if (get_msg_id(&entry->to_v_msg[0]) == msg_id) {
766 list_del(&entry->node);
767 found = true;
768 bcm_vk_msgid_bitmap_clear(vk, msg_id, 1);
769 break;
770 }
771 }
772 spin_unlock(&chan->pendq_lock);
773 return ((found) ? entry : NULL);
774 }
775
bcm_to_h_msg_dequeue(struct bcm_vk * vk)776 s32 bcm_to_h_msg_dequeue(struct bcm_vk *vk)
777 {
778 struct device *dev = &vk->pdev->dev;
779 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
780 struct vk_msg_blk *data;
781 struct vk_msg_blk __iomem *src;
782 struct vk_msg_blk *dst;
783 struct bcm_vk_msgq __iomem *msgq;
784 struct bcm_vk_sync_qinfo *qinfo;
785 struct bcm_vk_wkent *entry;
786 u32 rd_idx, wr_idx;
787 u32 q_num, msg_id, j;
788 u32 num_blks;
789 s32 total = 0;
790 int cnt = 0;
791 int msg_processed = 0;
792 int max_msg_to_process;
793 bool exit_loop;
794
795 /*
796 * drain all the messages from the queues, and find its pending
797 * entry in the to_v queue, based on msg_id & q_num, and move the
798 * entry to the to_h pending queue, waiting for user space
799 * program to extract
800 */
801 mutex_lock(&chan->msgq_mutex);
802
803 for (q_num = 0; q_num < chan->q_nr; q_num++) {
804 msgq = chan->msgq[q_num];
805 qinfo = &chan->sync_qinfo[q_num];
806 max_msg_to_process = BCM_VK_MSG_PROC_MAX_LOOP * qinfo->q_size;
807
808 rd_idx = readl_relaxed(&msgq->rd_idx);
809 wr_idx = readl_relaxed(&msgq->wr_idx);
810 msg_processed = 0;
811 exit_loop = false;
812 while ((rd_idx != wr_idx) && !exit_loop) {
813 u8 src_size;
814
815 /*
816 * Make a local copy and get pointer to src blk
817 * The rd_idx is masked before getting the pointer to
818 * avoid out of bound access in case the interface goes
819 * down. It will end up pointing to the last block in
820 * the buffer, but subsequent src->size check would be
821 * able to catch this.
822 */
823 src = msgq_blk_addr(qinfo, rd_idx & qinfo->q_mask);
824 src_size = readb(&src->size);
825
826 if ((rd_idx >= qinfo->q_size) ||
827 (src_size > (qinfo->q_size - 1))) {
828 dev_crit(dev,
829 "Invalid rd_idx 0x%x or size 0x%x => max 0x%x!",
830 rd_idx, src_size, qinfo->q_size);
831 bcm_vk_blk_drv_access(vk);
832 bcm_vk_set_host_alert(vk,
833 ERR_LOG_HOST_PCIE_DWN);
834 goto idx_err;
835 }
836
837 num_blks = src_size + 1;
838 data = kzalloc(num_blks * VK_MSGQ_BLK_SIZE, GFP_KERNEL);
839 if (data) {
840 /* copy messages and linearize it */
841 dst = data;
842 for (j = 0; j < num_blks; j++) {
843 memcpy_fromio(dst, src, sizeof(*dst));
844
845 dst++;
846 rd_idx = msgq_inc(qinfo, rd_idx, 1);
847 src = msgq_blk_addr(qinfo, rd_idx);
848 }
849 total++;
850 } else {
851 /*
852 * if we could not allocate memory in kernel,
853 * that is fatal.
854 */
855 dev_crit(dev, "Kernel mem allocation failure.\n");
856 total = -ENOMEM;
857 goto idx_err;
858 }
859
860 /* flush rd pointer after a message is dequeued */
861 writel(rd_idx, &msgq->rd_idx);
862
863 /* log new info for debugging */
864 dev_dbg(dev,
865 "MsgQ[%d] [Rd Wr] = [%d %d] blks extracted %d - Q = [u-%d a-%d]/%d\n",
866 readl_relaxed(&msgq->num),
867 rd_idx,
868 wr_idx,
869 num_blks,
870 msgq_occupied(msgq, qinfo),
871 msgq_avail_space(msgq, qinfo),
872 readl_relaxed(&msgq->size));
873
874 /*
875 * No need to search if it is an autonomous one-way
876 * message from driver, as these messages do not bear
877 * a to_v pending item. Currently, only the shutdown
878 * message falls into this category.
879 */
880 if (data->function_id == VK_FID_SHUTDOWN) {
881 kfree(data);
882 continue;
883 }
884
885 msg_id = get_msg_id(data);
886 /* lookup original message in to_v direction */
887 entry = bcm_vk_dequeue_pending(vk,
888 &vk->to_v_msg_chan,
889 q_num,
890 msg_id);
891
892 /*
893 * if there is message to does not have prior send,
894 * this is the location to add here
895 */
896 if (entry) {
897 entry->to_h_blks = num_blks;
898 entry->to_h_msg = data;
899 bcm_vk_append_pendq(&vk->to_h_msg_chan,
900 q_num, entry);
901
902 } else {
903 if (cnt++ < batch_log)
904 dev_info(dev,
905 "Could not find MsgId[0x%x] for resp func %d bmap %d\n",
906 msg_id, data->function_id,
907 test_bit(msg_id, vk->bmap));
908 kfree(data);
909 }
910 /* Fetch wr_idx to handle more back-to-back events */
911 wr_idx = readl(&msgq->wr_idx);
912
913 /*
914 * cap the max so that even we try to handle more back-to-back events,
915 * so that it won't hold CPU too long or in case rd/wr idexes are
916 * corrupted which triggers infinite looping.
917 */
918 if (++msg_processed >= max_msg_to_process) {
919 dev_warn(dev, "Q[%d] Per loop processing exceeds %d\n",
920 q_num, max_msg_to_process);
921 exit_loop = true;
922 }
923 }
924 }
925 idx_err:
926 mutex_unlock(&chan->msgq_mutex);
927 dev_dbg(dev, "total %d drained from queues\n", total);
928
929 return total;
930 }
931
932 /*
933 * init routine for all required data structures
934 */
bcm_vk_data_init(struct bcm_vk * vk)935 static int bcm_vk_data_init(struct bcm_vk *vk)
936 {
937 int i;
938
939 spin_lock_init(&vk->ctx_lock);
940 for (i = 0; i < ARRAY_SIZE(vk->ctx); i++) {
941 vk->ctx[i].in_use = false;
942 vk->ctx[i].idx = i; /* self identity */
943 vk->ctx[i].miscdev = NULL;
944 }
945 spin_lock_init(&vk->msg_id_lock);
946 spin_lock_init(&vk->host_alert_lock);
947 vk->msg_id = 0;
948
949 /* initialize hash table */
950 for (i = 0; i < VK_PID_HT_SZ; i++)
951 INIT_LIST_HEAD(&vk->pid_ht[i].head);
952
953 return 0;
954 }
955
bcm_vk_msgq_irqhandler(int irq,void * dev_id)956 irqreturn_t bcm_vk_msgq_irqhandler(int irq, void *dev_id)
957 {
958 struct bcm_vk *vk = dev_id;
959
960 if (!bcm_vk_drv_access_ok(vk)) {
961 dev_err(&vk->pdev->dev,
962 "Interrupt %d received when msgq not inited\n", irq);
963 goto skip_schedule_work;
964 }
965
966 queue_work(vk->wq_thread, &vk->wq_work);
967
968 skip_schedule_work:
969 return IRQ_HANDLED;
970 }
971
bcm_vk_open(struct inode * inode,struct file * p_file)972 int bcm_vk_open(struct inode *inode, struct file *p_file)
973 {
974 struct bcm_vk_ctx *ctx;
975 struct miscdevice *miscdev = (struct miscdevice *)p_file->private_data;
976 struct bcm_vk *vk = container_of(miscdev, struct bcm_vk, miscdev);
977 struct device *dev = &vk->pdev->dev;
978 int rc = 0;
979
980 /* get a context and set it up for file */
981 ctx = bcm_vk_get_ctx(vk, task_tgid_nr(current));
982 if (!ctx) {
983 dev_err(dev, "Error allocating context\n");
984 rc = -ENOMEM;
985 } else {
986 /*
987 * set up context and replace private data with context for
988 * other methods to use. Reason for the context is because
989 * it is allowed for multiple sessions to open the sysfs, and
990 * for each file open, when upper layer query the response,
991 * only those that are tied to a specific open should be
992 * returned. The context->idx will be used for such binding
993 */
994 ctx->miscdev = miscdev;
995 p_file->private_data = ctx;
996 dev_dbg(dev, "ctx_returned with idx %d, pid %d\n",
997 ctx->idx, ctx->pid);
998 }
999 return rc;
1000 }
1001
bcm_vk_read(struct file * p_file,char __user * buf,size_t count,loff_t * f_pos)1002 ssize_t bcm_vk_read(struct file *p_file,
1003 char __user *buf,
1004 size_t count,
1005 loff_t *f_pos)
1006 {
1007 ssize_t rc = -ENOMSG;
1008 struct bcm_vk_ctx *ctx = p_file->private_data;
1009 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
1010 miscdev);
1011 struct device *dev = &vk->pdev->dev;
1012 struct bcm_vk_msg_chan *chan = &vk->to_h_msg_chan;
1013 struct bcm_vk_wkent *entry = NULL;
1014 u32 q_num;
1015 u32 rsp_length;
1016 bool found = false;
1017
1018 if (!bcm_vk_drv_access_ok(vk))
1019 return -EPERM;
1020
1021 dev_dbg(dev, "Buf count %zu\n", count);
1022 found = false;
1023
1024 /*
1025 * search through the pendq on the to_h chan, and return only those
1026 * that belongs to the same context. Search is always from the high to
1027 * the low priority queues
1028 */
1029 spin_lock(&chan->pendq_lock);
1030 for (q_num = 0; q_num < chan->q_nr; q_num++) {
1031 list_for_each_entry(entry, &chan->pendq[q_num], node) {
1032 if (entry->ctx->idx == ctx->idx) {
1033 if (count >=
1034 (entry->to_h_blks * VK_MSGQ_BLK_SIZE)) {
1035 list_del(&entry->node);
1036 atomic_dec(&ctx->pend_cnt);
1037 found = true;
1038 } else {
1039 /* buffer not big enough */
1040 rc = -EMSGSIZE;
1041 }
1042 goto read_loop_exit;
1043 }
1044 }
1045 }
1046 read_loop_exit:
1047 spin_unlock(&chan->pendq_lock);
1048
1049 if (found) {
1050 /* retrieve the passed down msg_id */
1051 set_msg_id(&entry->to_h_msg[0], entry->usr_msg_id);
1052 rsp_length = entry->to_h_blks * VK_MSGQ_BLK_SIZE;
1053 if (copy_to_user(buf, entry->to_h_msg, rsp_length) == 0)
1054 rc = rsp_length;
1055
1056 bcm_vk_free_wkent(dev, entry);
1057 } else if (rc == -EMSGSIZE) {
1058 struct vk_msg_blk tmp_msg = entry->to_h_msg[0];
1059
1060 /*
1061 * in this case, return just the first block, so
1062 * that app knows what size it is looking for.
1063 */
1064 set_msg_id(&tmp_msg, entry->usr_msg_id);
1065 tmp_msg.size = entry->to_h_blks - 1;
1066 if (copy_to_user(buf, &tmp_msg, VK_MSGQ_BLK_SIZE) != 0) {
1067 dev_err(dev, "Error return 1st block in -EMSGSIZE\n");
1068 rc = -EFAULT;
1069 }
1070 }
1071 return rc;
1072 }
1073
bcm_vk_write(struct file * p_file,const char __user * buf,size_t count,loff_t * f_pos)1074 ssize_t bcm_vk_write(struct file *p_file,
1075 const char __user *buf,
1076 size_t count,
1077 loff_t *f_pos)
1078 {
1079 ssize_t rc;
1080 struct bcm_vk_ctx *ctx = p_file->private_data;
1081 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk,
1082 miscdev);
1083 struct bcm_vk_msgq __iomem *msgq;
1084 struct device *dev = &vk->pdev->dev;
1085 struct bcm_vk_wkent *entry;
1086 u32 sgl_extra_blks;
1087 u32 q_num;
1088 u32 msg_size;
1089 u32 msgq_size;
1090
1091 if (!bcm_vk_drv_access_ok(vk))
1092 return -EPERM;
1093
1094 dev_dbg(dev, "Msg count %zu\n", count);
1095
1096 /* first, do sanity check where count should be multiple of basic blk */
1097 if (count & (VK_MSGQ_BLK_SIZE - 1)) {
1098 dev_err(dev, "Failure with size %zu not multiple of %zu\n",
1099 count, VK_MSGQ_BLK_SIZE);
1100 rc = -EINVAL;
1101 goto write_err;
1102 }
1103
1104 /* allocate the work entry + buffer for size count and inband sgl */
1105 entry = kzalloc(sizeof(*entry) + count + vk->ib_sgl_size,
1106 GFP_KERNEL);
1107 if (!entry) {
1108 rc = -ENOMEM;
1109 goto write_err;
1110 }
1111
1112 /* now copy msg from user space, and then formulate the work entry */
1113 if (copy_from_user(&entry->to_v_msg[0], buf, count)) {
1114 rc = -EFAULT;
1115 goto write_free_ent;
1116 }
1117
1118 entry->to_v_blks = count >> VK_MSGQ_BLK_SZ_SHIFT;
1119 entry->ctx = ctx;
1120
1121 /* do a check on the blk size which could not exceed queue space */
1122 q_num = get_q_num(&entry->to_v_msg[0]);
1123 msgq = vk->to_v_msg_chan.msgq[q_num];
1124 msgq_size = readl_relaxed(&msgq->size);
1125 if (entry->to_v_blks + (vk->ib_sgl_size >> VK_MSGQ_BLK_SZ_SHIFT)
1126 > (msgq_size - 1)) {
1127 dev_err(dev, "Blk size %d exceed max queue size allowed %d\n",
1128 entry->to_v_blks, msgq_size - 1);
1129 rc = -EINVAL;
1130 goto write_free_ent;
1131 }
1132
1133 /* Use internal message id */
1134 entry->usr_msg_id = get_msg_id(&entry->to_v_msg[0]);
1135 rc = bcm_vk_get_msg_id(vk);
1136 if (rc == VK_MSG_ID_OVERFLOW) {
1137 dev_err(dev, "msg_id overflow\n");
1138 rc = -EOVERFLOW;
1139 goto write_free_ent;
1140 }
1141 set_msg_id(&entry->to_v_msg[0], rc);
1142 ctx->q_num = q_num;
1143
1144 dev_dbg(dev,
1145 "[Q-%d]Message ctx id %d, usr_msg_id 0x%x sent msg_id 0x%x\n",
1146 ctx->q_num, ctx->idx, entry->usr_msg_id,
1147 get_msg_id(&entry->to_v_msg[0]));
1148
1149 if (entry->to_v_msg[0].function_id == VK_FID_TRANS_BUF) {
1150 /* Convert any pointers to sg list */
1151 unsigned int num_planes;
1152 int dir;
1153 struct _vk_data *data;
1154
1155 /*
1156 * check if we are in reset, if so, no buffer transfer is
1157 * allowed and return error.
1158 */
1159 if (vk->reset_pid) {
1160 dev_dbg(dev, "No Transfer allowed during reset, pid %d.\n",
1161 ctx->pid);
1162 rc = -EACCES;
1163 goto write_free_msgid;
1164 }
1165
1166 num_planes = entry->to_v_msg[0].cmd & VK_CMD_PLANES_MASK;
1167 if ((entry->to_v_msg[0].cmd & VK_CMD_MASK) == VK_CMD_DOWNLOAD)
1168 dir = DMA_FROM_DEVICE;
1169 else
1170 dir = DMA_TO_DEVICE;
1171
1172 /* Calculate vk_data location */
1173 /* Go to end of the message */
1174 msg_size = entry->to_v_msg[0].size;
1175 if (msg_size > entry->to_v_blks) {
1176 rc = -EMSGSIZE;
1177 goto write_free_msgid;
1178 }
1179
1180 data = (struct _vk_data *)&entry->to_v_msg[msg_size + 1];
1181
1182 /* Now back up to the start of the pointers */
1183 data -= num_planes;
1184
1185 /* Convert user addresses to DMA SG List */
1186 rc = bcm_vk_sg_alloc(dev, entry->dma, dir, data, num_planes);
1187 if (rc)
1188 goto write_free_msgid;
1189
1190 atomic_inc(&ctx->dma_cnt);
1191 /* try to embed inband sgl */
1192 sgl_extra_blks = bcm_vk_append_ib_sgl(vk, entry, data,
1193 num_planes);
1194 entry->to_v_blks += sgl_extra_blks;
1195 entry->to_v_msg[0].size += sgl_extra_blks;
1196 } else if (entry->to_v_msg[0].function_id == VK_FID_INIT &&
1197 entry->to_v_msg[0].context_id == VK_NEW_CTX) {
1198 /*
1199 * Init happens in 2 stages, only the first stage contains the
1200 * pid that needs translating.
1201 */
1202 pid_t org_pid, pid;
1203
1204 /*
1205 * translate the pid into the unique host space as user
1206 * may run sessions inside containers or process
1207 * namespaces.
1208 */
1209 #define VK_MSG_PID_MASK 0xffffff00
1210 #define VK_MSG_PID_SH 8
1211 org_pid = (entry->to_v_msg[0].arg & VK_MSG_PID_MASK)
1212 >> VK_MSG_PID_SH;
1213
1214 pid = task_tgid_nr(current);
1215 entry->to_v_msg[0].arg =
1216 (entry->to_v_msg[0].arg & ~VK_MSG_PID_MASK) |
1217 (pid << VK_MSG_PID_SH);
1218 if (org_pid != pid)
1219 dev_dbg(dev, "In PID 0x%x(%d), converted PID 0x%x(%d)\n",
1220 org_pid, org_pid, pid, pid);
1221 }
1222
1223 /*
1224 * store work entry to pending queue until a response is received.
1225 * This needs to be done before enqueuing the message
1226 */
1227 bcm_vk_append_pendq(&vk->to_v_msg_chan, q_num, entry);
1228
1229 rc = bcm_to_v_msg_enqueue(vk, entry);
1230 if (rc) {
1231 dev_err(dev, "Fail to enqueue msg to to_v queue\n");
1232
1233 /* remove message from pending list */
1234 entry = bcm_vk_dequeue_pending
1235 (vk,
1236 &vk->to_v_msg_chan,
1237 q_num,
1238 get_msg_id(&entry->to_v_msg[0]));
1239 goto write_free_ent;
1240 }
1241
1242 return count;
1243
1244 write_free_msgid:
1245 bcm_vk_msgid_bitmap_clear(vk, get_msg_id(&entry->to_v_msg[0]), 1);
1246 write_free_ent:
1247 kfree(entry);
1248 write_err:
1249 return rc;
1250 }
1251
bcm_vk_poll(struct file * p_file,struct poll_table_struct * wait)1252 __poll_t bcm_vk_poll(struct file *p_file, struct poll_table_struct *wait)
1253 {
1254 __poll_t ret = 0;
1255 int cnt;
1256 struct bcm_vk_ctx *ctx = p_file->private_data;
1257 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
1258 struct device *dev = &vk->pdev->dev;
1259
1260 poll_wait(p_file, &ctx->rd_wq, wait);
1261
1262 cnt = atomic_read(&ctx->pend_cnt);
1263 if (cnt) {
1264 ret = (__force __poll_t)(POLLIN | POLLRDNORM);
1265 if (cnt < 0) {
1266 dev_err(dev, "Error cnt %d, setting back to 0", cnt);
1267 atomic_set(&ctx->pend_cnt, 0);
1268 }
1269 }
1270
1271 return ret;
1272 }
1273
bcm_vk_release(struct inode * inode,struct file * p_file)1274 int bcm_vk_release(struct inode *inode, struct file *p_file)
1275 {
1276 int ret;
1277 struct bcm_vk_ctx *ctx = p_file->private_data;
1278 struct bcm_vk *vk = container_of(ctx->miscdev, struct bcm_vk, miscdev);
1279 struct device *dev = &vk->pdev->dev;
1280 pid_t pid = ctx->pid;
1281 int dma_cnt;
1282 unsigned long timeout, start_time;
1283
1284 /*
1285 * if there are outstanding DMA transactions, need to delay long enough
1286 * to ensure that the card side would have stopped touching the host buffer
1287 * and its SGL list. A race condition could happen if the host app is killed
1288 * abruptly, eg kill -9, while some DMA transfer orders are still inflight.
1289 * Nothing could be done except for a delay as host side is running in a
1290 * completely async fashion.
1291 */
1292 start_time = jiffies;
1293 timeout = start_time + msecs_to_jiffies(BCM_VK_DMA_DRAIN_MAX_MS);
1294 do {
1295 if (time_after(jiffies, timeout)) {
1296 dev_warn(dev, "%d dma still pending for [fd-%d] pid %d\n",
1297 dma_cnt, ctx->idx, pid);
1298 break;
1299 }
1300 dma_cnt = atomic_read(&ctx->dma_cnt);
1301 cpu_relax();
1302 cond_resched();
1303 } while (dma_cnt);
1304 dev_dbg(dev, "Draining for [fd-%d] pid %d - delay %d ms\n",
1305 ctx->idx, pid, jiffies_to_msecs(jiffies - start_time));
1306
1307 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, ctx);
1308 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, ctx);
1309
1310 ret = bcm_vk_free_ctx(vk, ctx);
1311 if (ret == 0)
1312 ret = bcm_vk_handle_last_sess(vk, pid, ctx->q_num);
1313 else
1314 ret = 0;
1315
1316 kref_put(&vk->kref, bcm_vk_release_data);
1317
1318 return ret;
1319 }
1320
bcm_vk_msg_init(struct bcm_vk * vk)1321 int bcm_vk_msg_init(struct bcm_vk *vk)
1322 {
1323 struct device *dev = &vk->pdev->dev;
1324 int ret;
1325
1326 if (bcm_vk_data_init(vk)) {
1327 dev_err(dev, "Error initializing internal data structures\n");
1328 return -EINVAL;
1329 }
1330
1331 if (bcm_vk_msg_chan_init(&vk->to_v_msg_chan) ||
1332 bcm_vk_msg_chan_init(&vk->to_h_msg_chan)) {
1333 dev_err(dev, "Error initializing communication channel\n");
1334 return -EIO;
1335 }
1336
1337 /* read msgq info if ready */
1338 ret = bcm_vk_sync_msgq(vk, false);
1339 if (ret && (ret != -EAGAIN)) {
1340 dev_err(dev, "Error reading comm msg Q info\n");
1341 return -EIO;
1342 }
1343
1344 return 0;
1345 }
1346
bcm_vk_msg_remove(struct bcm_vk * vk)1347 void bcm_vk_msg_remove(struct bcm_vk *vk)
1348 {
1349 bcm_vk_blk_drv_access(vk);
1350
1351 /* drain all pending items */
1352 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_v_msg_chan, NULL);
1353 bcm_vk_drain_all_pend(&vk->pdev->dev, &vk->to_h_msg_chan, NULL);
1354 }
1355
1356