1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Filename: cregs.c
4 *
5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
7 *
8 * (C) Copyright 2013 IBM Corporation
9 */
10
11 #include <linux/completion.h>
12 #include <linux/slab.h>
13
14 #include "rsxx_priv.h"
15
16 #define CREG_TIMEOUT_MSEC 10000
17
18 typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
19 struct creg_cmd *cmd,
20 int st);
21
22 struct creg_cmd {
23 struct list_head list;
24 creg_cmd_cb cb;
25 void *cb_private;
26 unsigned int op;
27 unsigned int addr;
28 int cnt8;
29 void *buf;
30 unsigned int stream;
31 unsigned int status;
32 };
33
34 static struct kmem_cache *creg_cmd_pool;
35
36
37 /*------------ Private Functions --------------*/
38
39 #if defined(__LITTLE_ENDIAN)
40 #define LITTLE_ENDIAN 1
41 #elif defined(__BIG_ENDIAN)
42 #define LITTLE_ENDIAN 0
43 #else
44 #error Unknown endianess!!! Aborting...
45 #endif
46
copy_to_creg_data(struct rsxx_cardinfo * card,int cnt8,void * buf,unsigned int stream)47 static int copy_to_creg_data(struct rsxx_cardinfo *card,
48 int cnt8,
49 void *buf,
50 unsigned int stream)
51 {
52 int i = 0;
53 u32 *data = buf;
54
55 if (unlikely(card->eeh_state))
56 return -EIO;
57
58 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
59 /*
60 * Firmware implementation makes it necessary to byte swap on
61 * little endian processors.
62 */
63 if (LITTLE_ENDIAN && stream)
64 iowrite32be(data[i], card->regmap + CREG_DATA(i));
65 else
66 iowrite32(data[i], card->regmap + CREG_DATA(i));
67 }
68
69 return 0;
70 }
71
72
copy_from_creg_data(struct rsxx_cardinfo * card,int cnt8,void * buf,unsigned int stream)73 static int copy_from_creg_data(struct rsxx_cardinfo *card,
74 int cnt8,
75 void *buf,
76 unsigned int stream)
77 {
78 int i = 0;
79 u32 *data = buf;
80
81 if (unlikely(card->eeh_state))
82 return -EIO;
83
84 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
85 /*
86 * Firmware implementation makes it necessary to byte swap on
87 * little endian processors.
88 */
89 if (LITTLE_ENDIAN && stream)
90 data[i] = ioread32be(card->regmap + CREG_DATA(i));
91 else
92 data[i] = ioread32(card->regmap + CREG_DATA(i));
93 }
94
95 return 0;
96 }
97
creg_issue_cmd(struct rsxx_cardinfo * card,struct creg_cmd * cmd)98 static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
99 {
100 int st;
101
102 if (unlikely(card->eeh_state))
103 return;
104
105 iowrite32(cmd->addr, card->regmap + CREG_ADD);
106 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
107
108 if (cmd->op == CREG_OP_WRITE) {
109 if (cmd->buf) {
110 st = copy_to_creg_data(card, cmd->cnt8,
111 cmd->buf, cmd->stream);
112 if (st)
113 return;
114 }
115 }
116
117 if (unlikely(card->eeh_state))
118 return;
119
120 /* Setting the valid bit will kick off the command. */
121 iowrite32(cmd->op, card->regmap + CREG_CMD);
122 }
123
creg_kick_queue(struct rsxx_cardinfo * card)124 static void creg_kick_queue(struct rsxx_cardinfo *card)
125 {
126 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
127 return;
128
129 card->creg_ctrl.active = 1;
130 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
131 struct creg_cmd, list);
132 list_del(&card->creg_ctrl.active_cmd->list);
133 card->creg_ctrl.q_depth--;
134
135 /*
136 * We have to set the timer before we push the new command. Otherwise,
137 * we could create a race condition that would occur if the timer
138 * was not canceled, and expired after the new command was pushed,
139 * but before the command was issued to hardware.
140 */
141 mod_timer(&card->creg_ctrl.cmd_timer,
142 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
143
144 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
145 }
146
creg_queue_cmd(struct rsxx_cardinfo * card,unsigned int op,unsigned int addr,unsigned int cnt8,void * buf,int stream,creg_cmd_cb callback,void * cb_private)147 static int creg_queue_cmd(struct rsxx_cardinfo *card,
148 unsigned int op,
149 unsigned int addr,
150 unsigned int cnt8,
151 void *buf,
152 int stream,
153 creg_cmd_cb callback,
154 void *cb_private)
155 {
156 struct creg_cmd *cmd;
157
158 /* Don't queue stuff up if we're halted. */
159 if (unlikely(card->halt))
160 return -EINVAL;
161
162 if (card->creg_ctrl.reset)
163 return -EAGAIN;
164
165 if (cnt8 > MAX_CREG_DATA8)
166 return -EINVAL;
167
168 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
169 if (!cmd)
170 return -ENOMEM;
171
172 INIT_LIST_HEAD(&cmd->list);
173
174 cmd->op = op;
175 cmd->addr = addr;
176 cmd->cnt8 = cnt8;
177 cmd->buf = buf;
178 cmd->stream = stream;
179 cmd->cb = callback;
180 cmd->cb_private = cb_private;
181 cmd->status = 0;
182
183 spin_lock_bh(&card->creg_ctrl.lock);
184 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
185 card->creg_ctrl.q_depth++;
186 creg_kick_queue(card);
187 spin_unlock_bh(&card->creg_ctrl.lock);
188
189 return 0;
190 }
191
creg_cmd_timed_out(struct timer_list * t)192 static void creg_cmd_timed_out(struct timer_list *t)
193 {
194 struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
195 struct creg_cmd *cmd;
196
197 spin_lock(&card->creg_ctrl.lock);
198 cmd = card->creg_ctrl.active_cmd;
199 card->creg_ctrl.active_cmd = NULL;
200 spin_unlock(&card->creg_ctrl.lock);
201
202 if (cmd == NULL) {
203 card->creg_ctrl.creg_stats.creg_timeout++;
204 dev_warn(CARD_TO_DEV(card),
205 "No active command associated with timeout!\n");
206 return;
207 }
208
209 if (cmd->cb)
210 cmd->cb(card, cmd, -ETIMEDOUT);
211
212 kmem_cache_free(creg_cmd_pool, cmd);
213
214
215 spin_lock(&card->creg_ctrl.lock);
216 card->creg_ctrl.active = 0;
217 creg_kick_queue(card);
218 spin_unlock(&card->creg_ctrl.lock);
219 }
220
221
creg_cmd_done(struct work_struct * work)222 static void creg_cmd_done(struct work_struct *work)
223 {
224 struct rsxx_cardinfo *card;
225 struct creg_cmd *cmd;
226 int st = 0;
227
228 card = container_of(work, struct rsxx_cardinfo,
229 creg_ctrl.done_work);
230
231 /*
232 * The timer could not be cancelled for some reason,
233 * race to pop the active command.
234 */
235 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
236 card->creg_ctrl.creg_stats.failed_cancel_timer++;
237
238 spin_lock_bh(&card->creg_ctrl.lock);
239 cmd = card->creg_ctrl.active_cmd;
240 card->creg_ctrl.active_cmd = NULL;
241 spin_unlock_bh(&card->creg_ctrl.lock);
242
243 if (cmd == NULL) {
244 dev_err(CARD_TO_DEV(card),
245 "Spurious creg interrupt!\n");
246 return;
247 }
248
249 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
250 cmd->status = card->creg_ctrl.creg_stats.stat;
251 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
252 dev_err(CARD_TO_DEV(card),
253 "Invalid status on creg command\n");
254 /*
255 * At this point we're probably reading garbage from HW. Don't
256 * do anything else that could mess up the system and let
257 * the sync function return an error.
258 */
259 st = -EIO;
260 goto creg_done;
261 } else if (cmd->status & CREG_STAT_ERROR) {
262 st = -EIO;
263 }
264
265 if (cmd->op == CREG_OP_READ) {
266 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
267
268 /* Paranoid Sanity Checks */
269 if (!cmd->buf) {
270 dev_err(CARD_TO_DEV(card),
271 "Buffer not given for read.\n");
272 st = -EIO;
273 goto creg_done;
274 }
275 if (cnt8 != cmd->cnt8) {
276 dev_err(CARD_TO_DEV(card),
277 "count mismatch\n");
278 st = -EIO;
279 goto creg_done;
280 }
281
282 st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
283 }
284
285 creg_done:
286 if (cmd->cb)
287 cmd->cb(card, cmd, st);
288
289 kmem_cache_free(creg_cmd_pool, cmd);
290
291 spin_lock_bh(&card->creg_ctrl.lock);
292 card->creg_ctrl.active = 0;
293 creg_kick_queue(card);
294 spin_unlock_bh(&card->creg_ctrl.lock);
295 }
296
creg_reset(struct rsxx_cardinfo * card)297 static void creg_reset(struct rsxx_cardinfo *card)
298 {
299 struct creg_cmd *cmd = NULL;
300 struct creg_cmd *tmp;
301 unsigned long flags;
302
303 /*
304 * mutex_trylock is used here because if reset_lock is taken then a
305 * reset is already happening. So, we can just go ahead and return.
306 */
307 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
308 return;
309
310 card->creg_ctrl.reset = 1;
311 spin_lock_irqsave(&card->irq_lock, flags);
312 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
313 spin_unlock_irqrestore(&card->irq_lock, flags);
314
315 dev_warn(CARD_TO_DEV(card),
316 "Resetting creg interface for recovery\n");
317
318 /* Cancel outstanding commands */
319 spin_lock_bh(&card->creg_ctrl.lock);
320 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
321 list_del(&cmd->list);
322 card->creg_ctrl.q_depth--;
323 if (cmd->cb)
324 cmd->cb(card, cmd, -ECANCELED);
325 kmem_cache_free(creg_cmd_pool, cmd);
326 }
327
328 cmd = card->creg_ctrl.active_cmd;
329 card->creg_ctrl.active_cmd = NULL;
330 if (cmd) {
331 if (timer_pending(&card->creg_ctrl.cmd_timer))
332 del_timer_sync(&card->creg_ctrl.cmd_timer);
333
334 if (cmd->cb)
335 cmd->cb(card, cmd, -ECANCELED);
336 kmem_cache_free(creg_cmd_pool, cmd);
337
338 card->creg_ctrl.active = 0;
339 }
340 spin_unlock_bh(&card->creg_ctrl.lock);
341
342 card->creg_ctrl.reset = 0;
343 spin_lock_irqsave(&card->irq_lock, flags);
344 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
345 spin_unlock_irqrestore(&card->irq_lock, flags);
346
347 mutex_unlock(&card->creg_ctrl.reset_lock);
348 }
349
350 /* Used for synchronous accesses */
351 struct creg_completion {
352 struct completion *cmd_done;
353 int st;
354 u32 creg_status;
355 };
356
creg_cmd_done_cb(struct rsxx_cardinfo * card,struct creg_cmd * cmd,int st)357 static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
358 struct creg_cmd *cmd,
359 int st)
360 {
361 struct creg_completion *cmd_completion;
362
363 cmd_completion = cmd->cb_private;
364 BUG_ON(!cmd_completion);
365
366 cmd_completion->st = st;
367 cmd_completion->creg_status = cmd->status;
368 complete(cmd_completion->cmd_done);
369 }
370
__issue_creg_rw(struct rsxx_cardinfo * card,unsigned int op,unsigned int addr,unsigned int cnt8,void * buf,int stream,unsigned int * hw_stat)371 static int __issue_creg_rw(struct rsxx_cardinfo *card,
372 unsigned int op,
373 unsigned int addr,
374 unsigned int cnt8,
375 void *buf,
376 int stream,
377 unsigned int *hw_stat)
378 {
379 DECLARE_COMPLETION_ONSTACK(cmd_done);
380 struct creg_completion completion;
381 unsigned long timeout;
382 int st;
383
384 completion.cmd_done = &cmd_done;
385 completion.st = 0;
386 completion.creg_status = 0;
387
388 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
389 &completion);
390 if (st)
391 return st;
392
393 /*
394 * This timeout is necessary for unresponsive hardware. The additional
395 * 20 seconds to used to guarantee that each cregs requests has time to
396 * complete.
397 */
398 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
399 card->creg_ctrl.q_depth + 20000);
400
401 /*
402 * The creg interface is guaranteed to complete. It has a timeout
403 * mechanism that will kick in if hardware does not respond.
404 */
405 st = wait_for_completion_timeout(completion.cmd_done, timeout);
406 if (st == 0) {
407 /*
408 * This is really bad, because the kernel timer did not
409 * expire and notify us of a timeout!
410 */
411 dev_crit(CARD_TO_DEV(card),
412 "cregs timer failed\n");
413 creg_reset(card);
414 return -EIO;
415 }
416
417 *hw_stat = completion.creg_status;
418
419 if (completion.st) {
420 /*
421 * This read is needed to verify that there has not been any
422 * extreme errors that might have occurred, i.e. EEH. The
423 * function iowrite32 will not detect EEH errors, so it is
424 * necessary that we recover if such an error is the reason
425 * for the timeout. This is a dummy read.
426 */
427 ioread32(card->regmap + SCRATCH);
428
429 dev_warn(CARD_TO_DEV(card),
430 "creg command failed(%d x%08x)\n",
431 completion.st, addr);
432 return completion.st;
433 }
434
435 return 0;
436 }
437
issue_creg_rw(struct rsxx_cardinfo * card,u32 addr,unsigned int size8,void * data,int stream,int read)438 static int issue_creg_rw(struct rsxx_cardinfo *card,
439 u32 addr,
440 unsigned int size8,
441 void *data,
442 int stream,
443 int read)
444 {
445 unsigned int hw_stat;
446 unsigned int xfer;
447 unsigned int op;
448 int st;
449
450 op = read ? CREG_OP_READ : CREG_OP_WRITE;
451
452 do {
453 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
454
455 st = __issue_creg_rw(card, op, addr, xfer,
456 data, stream, &hw_stat);
457 if (st)
458 return st;
459
460 data = (char *)data + xfer;
461 addr += xfer;
462 size8 -= xfer;
463 } while (size8);
464
465 return 0;
466 }
467
468 /* ---------------------------- Public API ---------------------------------- */
rsxx_creg_write(struct rsxx_cardinfo * card,u32 addr,unsigned int size8,void * data,int byte_stream)469 int rsxx_creg_write(struct rsxx_cardinfo *card,
470 u32 addr,
471 unsigned int size8,
472 void *data,
473 int byte_stream)
474 {
475 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
476 }
477
rsxx_creg_read(struct rsxx_cardinfo * card,u32 addr,unsigned int size8,void * data,int byte_stream)478 int rsxx_creg_read(struct rsxx_cardinfo *card,
479 u32 addr,
480 unsigned int size8,
481 void *data,
482 int byte_stream)
483 {
484 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
485 }
486
rsxx_get_card_state(struct rsxx_cardinfo * card,unsigned int * state)487 int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
488 {
489 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
490 sizeof(*state), state, 0);
491 }
492
rsxx_get_card_size8(struct rsxx_cardinfo * card,u64 * size8)493 int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
494 {
495 unsigned int size;
496 int st;
497
498 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
499 sizeof(size), &size, 0);
500 if (st)
501 return st;
502
503 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
504 return 0;
505 }
506
rsxx_get_num_targets(struct rsxx_cardinfo * card,unsigned int * n_targets)507 int rsxx_get_num_targets(struct rsxx_cardinfo *card,
508 unsigned int *n_targets)
509 {
510 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
511 sizeof(*n_targets), n_targets, 0);
512 }
513
rsxx_get_card_capabilities(struct rsxx_cardinfo * card,u32 * capabilities)514 int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
515 u32 *capabilities)
516 {
517 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
518 sizeof(*capabilities), capabilities, 0);
519 }
520
rsxx_issue_card_cmd(struct rsxx_cardinfo * card,u32 cmd)521 int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
522 {
523 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
524 sizeof(cmd), &cmd, 0);
525 }
526
527
528 /*----------------- HW Log Functions -------------------*/
hw_log_msg(struct rsxx_cardinfo * card,const char * str,int len)529 static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
530 {
531 static char level;
532
533 /*
534 * New messages start with "<#>", where # is the log level. Messages
535 * that extend past the log buffer will use the previous level
536 */
537 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
538 level = str[1];
539 str += 3; /* Skip past the log level. */
540 len -= 3;
541 }
542
543 switch (level) {
544 case '0':
545 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
546 break;
547 case '1':
548 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
549 break;
550 case '2':
551 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
552 break;
553 case '3':
554 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
555 break;
556 case '4':
557 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
558 break;
559 case '5':
560 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
561 break;
562 case '6':
563 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
564 break;
565 case '7':
566 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
567 break;
568 default:
569 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
570 break;
571 }
572 }
573
574 /*
575 * The substrncpy function copies the src string (which includes the
576 * terminating '\0' character), up to the count into the dest pointer.
577 * Returns the number of bytes copied to dest.
578 */
substrncpy(char * dest,const char * src,int count)579 static int substrncpy(char *dest, const char *src, int count)
580 {
581 int max_cnt = count;
582
583 while (count) {
584 count--;
585 *dest = *src;
586 if (*dest == '\0')
587 break;
588 src++;
589 dest++;
590 }
591 return max_cnt - count;
592 }
593
594
read_hw_log_done(struct rsxx_cardinfo * card,struct creg_cmd * cmd,int st)595 static void read_hw_log_done(struct rsxx_cardinfo *card,
596 struct creg_cmd *cmd,
597 int st)
598 {
599 char *buf;
600 char *log_str;
601 int cnt;
602 int len;
603 int off;
604
605 buf = cmd->buf;
606 off = 0;
607
608 /* Failed getting the log message */
609 if (st)
610 return;
611
612 while (off < cmd->cnt8) {
613 log_str = &card->log.buf[card->log.buf_len];
614 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
615 len = substrncpy(log_str, &buf[off], cnt);
616
617 off += len;
618 card->log.buf_len += len;
619
620 /*
621 * Flush the log if we've hit the end of a message or if we've
622 * run out of buffer space.
623 */
624 if ((log_str[len - 1] == '\0') ||
625 (card->log.buf_len == LOG_BUF_SIZE8)) {
626 if (card->log.buf_len != 1) /* Don't log blank lines. */
627 hw_log_msg(card, card->log.buf,
628 card->log.buf_len);
629 card->log.buf_len = 0;
630 }
631
632 }
633
634 if (cmd->status & CREG_STAT_LOG_PENDING)
635 rsxx_read_hw_log(card);
636 }
637
rsxx_read_hw_log(struct rsxx_cardinfo * card)638 int rsxx_read_hw_log(struct rsxx_cardinfo *card)
639 {
640 int st;
641
642 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
643 sizeof(card->log.tmp), card->log.tmp,
644 1, read_hw_log_done, NULL);
645 if (st)
646 dev_err(CARD_TO_DEV(card),
647 "Failed getting log text\n");
648
649 return st;
650 }
651
652 /*-------------- IOCTL REG Access ------------------*/
issue_reg_cmd(struct rsxx_cardinfo * card,struct rsxx_reg_access * cmd,int read)653 static int issue_reg_cmd(struct rsxx_cardinfo *card,
654 struct rsxx_reg_access *cmd,
655 int read)
656 {
657 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
658
659 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
660 cmd->stream, &cmd->stat);
661 }
662
rsxx_reg_access(struct rsxx_cardinfo * card,struct rsxx_reg_access __user * ucmd,int read)663 int rsxx_reg_access(struct rsxx_cardinfo *card,
664 struct rsxx_reg_access __user *ucmd,
665 int read)
666 {
667 struct rsxx_reg_access cmd;
668 int st;
669
670 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
671 if (st)
672 return -EFAULT;
673
674 if (cmd.cnt > RSXX_MAX_REG_CNT)
675 return -EFAULT;
676
677 st = issue_reg_cmd(card, &cmd, read);
678 if (st)
679 return st;
680
681 st = put_user(cmd.stat, &ucmd->stat);
682 if (st)
683 return -EFAULT;
684
685 if (read) {
686 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
687 if (st)
688 return -EFAULT;
689 }
690
691 return 0;
692 }
693
rsxx_eeh_save_issued_creg(struct rsxx_cardinfo * card)694 void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
695 {
696 struct creg_cmd *cmd = NULL;
697
698 cmd = card->creg_ctrl.active_cmd;
699 card->creg_ctrl.active_cmd = NULL;
700
701 if (cmd) {
702 del_timer_sync(&card->creg_ctrl.cmd_timer);
703
704 spin_lock_bh(&card->creg_ctrl.lock);
705 list_add(&cmd->list, &card->creg_ctrl.queue);
706 card->creg_ctrl.q_depth++;
707 card->creg_ctrl.active = 0;
708 spin_unlock_bh(&card->creg_ctrl.lock);
709 }
710 }
711
rsxx_kick_creg_queue(struct rsxx_cardinfo * card)712 void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
713 {
714 spin_lock_bh(&card->creg_ctrl.lock);
715 if (!list_empty(&card->creg_ctrl.queue))
716 creg_kick_queue(card);
717 spin_unlock_bh(&card->creg_ctrl.lock);
718 }
719
720 /*------------ Initialization & Setup --------------*/
rsxx_creg_setup(struct rsxx_cardinfo * card)721 int rsxx_creg_setup(struct rsxx_cardinfo *card)
722 {
723 card->creg_ctrl.active_cmd = NULL;
724
725 card->creg_ctrl.creg_wq =
726 create_singlethread_workqueue(DRIVER_NAME"_creg");
727 if (!card->creg_ctrl.creg_wq)
728 return -ENOMEM;
729
730 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
731 mutex_init(&card->creg_ctrl.reset_lock);
732 INIT_LIST_HEAD(&card->creg_ctrl.queue);
733 spin_lock_init(&card->creg_ctrl.lock);
734 timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
735
736 return 0;
737 }
738
rsxx_creg_destroy(struct rsxx_cardinfo * card)739 void rsxx_creg_destroy(struct rsxx_cardinfo *card)
740 {
741 struct creg_cmd *cmd;
742 struct creg_cmd *tmp;
743 int cnt = 0;
744
745 /* Cancel outstanding commands */
746 spin_lock_bh(&card->creg_ctrl.lock);
747 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
748 list_del(&cmd->list);
749 if (cmd->cb)
750 cmd->cb(card, cmd, -ECANCELED);
751 kmem_cache_free(creg_cmd_pool, cmd);
752 cnt++;
753 }
754
755 if (cnt)
756 dev_info(CARD_TO_DEV(card),
757 "Canceled %d queue creg commands\n", cnt);
758
759 cmd = card->creg_ctrl.active_cmd;
760 card->creg_ctrl.active_cmd = NULL;
761 if (cmd) {
762 if (timer_pending(&card->creg_ctrl.cmd_timer))
763 del_timer_sync(&card->creg_ctrl.cmd_timer);
764
765 if (cmd->cb)
766 cmd->cb(card, cmd, -ECANCELED);
767 dev_info(CARD_TO_DEV(card),
768 "Canceled active creg command\n");
769 kmem_cache_free(creg_cmd_pool, cmd);
770 }
771 spin_unlock_bh(&card->creg_ctrl.lock);
772
773 cancel_work_sync(&card->creg_ctrl.done_work);
774 }
775
776
rsxx_creg_init(void)777 int rsxx_creg_init(void)
778 {
779 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
780 if (!creg_cmd_pool)
781 return -ENOMEM;
782
783 return 0;
784 }
785
rsxx_creg_cleanup(void)786 void rsxx_creg_cleanup(void)
787 {
788 kmem_cache_destroy(creg_cmd_pool);
789 }
790