treewide: remove redundant IS_ERR() before error code check
[linux/fpc-iii.git] / drivers / block / rsxx / cregs.c
blob60ecd3f7cbd2ab14910b0f02320fc325e07b18f9
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Filename: cregs.c
5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
9 */
11 #include <linux/completion.h>
12 #include <linux/slab.h>
14 #include "rsxx_priv.h"
16 #define CREG_TIMEOUT_MSEC 10000
18 typedef void (*creg_cmd_cb)(struct rsxx_cardinfo *card,
19 struct creg_cmd *cmd,
20 int st);
22 struct creg_cmd {
23 struct list_head list;
24 creg_cmd_cb cb;
25 void *cb_private;
26 unsigned int op;
27 unsigned int addr;
28 int cnt8;
29 void *buf;
30 unsigned int stream;
31 unsigned int status;
34 static struct kmem_cache *creg_cmd_pool;
37 /*------------ Private Functions --------------*/
39 #if defined(__LITTLE_ENDIAN)
40 #define LITTLE_ENDIAN 1
41 #elif defined(__BIG_ENDIAN)
42 #define LITTLE_ENDIAN 0
43 #else
44 #error Unknown endianess!!! Aborting...
45 #endif
47 static int copy_to_creg_data(struct rsxx_cardinfo *card,
48 int cnt8,
49 void *buf,
50 unsigned int stream)
52 int i = 0;
53 u32 *data = buf;
55 if (unlikely(card->eeh_state))
56 return -EIO;
58 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
60 * Firmware implementation makes it necessary to byte swap on
61 * little endian processors.
63 if (LITTLE_ENDIAN && stream)
64 iowrite32be(data[i], card->regmap + CREG_DATA(i));
65 else
66 iowrite32(data[i], card->regmap + CREG_DATA(i));
69 return 0;
73 static int copy_from_creg_data(struct rsxx_cardinfo *card,
74 int cnt8,
75 void *buf,
76 unsigned int stream)
78 int i = 0;
79 u32 *data = buf;
81 if (unlikely(card->eeh_state))
82 return -EIO;
84 for (i = 0; cnt8 > 0; i++, cnt8 -= 4) {
86 * Firmware implementation makes it necessary to byte swap on
87 * little endian processors.
89 if (LITTLE_ENDIAN && stream)
90 data[i] = ioread32be(card->regmap + CREG_DATA(i));
91 else
92 data[i] = ioread32(card->regmap + CREG_DATA(i));
95 return 0;
98 static void creg_issue_cmd(struct rsxx_cardinfo *card, struct creg_cmd *cmd)
100 int st;
102 if (unlikely(card->eeh_state))
103 return;
105 iowrite32(cmd->addr, card->regmap + CREG_ADD);
106 iowrite32(cmd->cnt8, card->regmap + CREG_CNT);
108 if (cmd->op == CREG_OP_WRITE) {
109 if (cmd->buf) {
110 st = copy_to_creg_data(card, cmd->cnt8,
111 cmd->buf, cmd->stream);
112 if (st)
113 return;
117 if (unlikely(card->eeh_state))
118 return;
120 /* Setting the valid bit will kick off the command. */
121 iowrite32(cmd->op, card->regmap + CREG_CMD);
124 static void creg_kick_queue(struct rsxx_cardinfo *card)
126 if (card->creg_ctrl.active || list_empty(&card->creg_ctrl.queue))
127 return;
129 card->creg_ctrl.active = 1;
130 card->creg_ctrl.active_cmd = list_first_entry(&card->creg_ctrl.queue,
131 struct creg_cmd, list);
132 list_del(&card->creg_ctrl.active_cmd->list);
133 card->creg_ctrl.q_depth--;
136 * We have to set the timer before we push the new command. Otherwise,
137 * we could create a race condition that would occur if the timer
138 * was not canceled, and expired after the new command was pushed,
139 * but before the command was issued to hardware.
141 mod_timer(&card->creg_ctrl.cmd_timer,
142 jiffies + msecs_to_jiffies(CREG_TIMEOUT_MSEC));
144 creg_issue_cmd(card, card->creg_ctrl.active_cmd);
147 static int creg_queue_cmd(struct rsxx_cardinfo *card,
148 unsigned int op,
149 unsigned int addr,
150 unsigned int cnt8,
151 void *buf,
152 int stream,
153 creg_cmd_cb callback,
154 void *cb_private)
156 struct creg_cmd *cmd;
158 /* Don't queue stuff up if we're halted. */
159 if (unlikely(card->halt))
160 return -EINVAL;
162 if (card->creg_ctrl.reset)
163 return -EAGAIN;
165 if (cnt8 > MAX_CREG_DATA8)
166 return -EINVAL;
168 cmd = kmem_cache_alloc(creg_cmd_pool, GFP_KERNEL);
169 if (!cmd)
170 return -ENOMEM;
172 INIT_LIST_HEAD(&cmd->list);
174 cmd->op = op;
175 cmd->addr = addr;
176 cmd->cnt8 = cnt8;
177 cmd->buf = buf;
178 cmd->stream = stream;
179 cmd->cb = callback;
180 cmd->cb_private = cb_private;
181 cmd->status = 0;
183 spin_lock_bh(&card->creg_ctrl.lock);
184 list_add_tail(&cmd->list, &card->creg_ctrl.queue);
185 card->creg_ctrl.q_depth++;
186 creg_kick_queue(card);
187 spin_unlock_bh(&card->creg_ctrl.lock);
189 return 0;
192 static void creg_cmd_timed_out(struct timer_list *t)
194 struct rsxx_cardinfo *card = from_timer(card, t, creg_ctrl.cmd_timer);
195 struct creg_cmd *cmd;
197 spin_lock(&card->creg_ctrl.lock);
198 cmd = card->creg_ctrl.active_cmd;
199 card->creg_ctrl.active_cmd = NULL;
200 spin_unlock(&card->creg_ctrl.lock);
202 if (cmd == NULL) {
203 card->creg_ctrl.creg_stats.creg_timeout++;
204 dev_warn(CARD_TO_DEV(card),
205 "No active command associated with timeout!\n");
206 return;
209 if (cmd->cb)
210 cmd->cb(card, cmd, -ETIMEDOUT);
212 kmem_cache_free(creg_cmd_pool, cmd);
215 spin_lock(&card->creg_ctrl.lock);
216 card->creg_ctrl.active = 0;
217 creg_kick_queue(card);
218 spin_unlock(&card->creg_ctrl.lock);
222 static void creg_cmd_done(struct work_struct *work)
224 struct rsxx_cardinfo *card;
225 struct creg_cmd *cmd;
226 int st = 0;
228 card = container_of(work, struct rsxx_cardinfo,
229 creg_ctrl.done_work);
232 * The timer could not be cancelled for some reason,
233 * race to pop the active command.
235 if (del_timer_sync(&card->creg_ctrl.cmd_timer) == 0)
236 card->creg_ctrl.creg_stats.failed_cancel_timer++;
238 spin_lock_bh(&card->creg_ctrl.lock);
239 cmd = card->creg_ctrl.active_cmd;
240 card->creg_ctrl.active_cmd = NULL;
241 spin_unlock_bh(&card->creg_ctrl.lock);
243 if (cmd == NULL) {
244 dev_err(CARD_TO_DEV(card),
245 "Spurious creg interrupt!\n");
246 return;
249 card->creg_ctrl.creg_stats.stat = ioread32(card->regmap + CREG_STAT);
250 cmd->status = card->creg_ctrl.creg_stats.stat;
251 if ((cmd->status & CREG_STAT_STATUS_MASK) == 0) {
252 dev_err(CARD_TO_DEV(card),
253 "Invalid status on creg command\n");
255 * At this point we're probably reading garbage from HW. Don't
256 * do anything else that could mess up the system and let
257 * the sync function return an error.
259 st = -EIO;
260 goto creg_done;
261 } else if (cmd->status & CREG_STAT_ERROR) {
262 st = -EIO;
265 if (cmd->op == CREG_OP_READ) {
266 unsigned int cnt8 = ioread32(card->regmap + CREG_CNT);
268 /* Paranoid Sanity Checks */
269 if (!cmd->buf) {
270 dev_err(CARD_TO_DEV(card),
271 "Buffer not given for read.\n");
272 st = -EIO;
273 goto creg_done;
275 if (cnt8 != cmd->cnt8) {
276 dev_err(CARD_TO_DEV(card),
277 "count mismatch\n");
278 st = -EIO;
279 goto creg_done;
282 st = copy_from_creg_data(card, cnt8, cmd->buf, cmd->stream);
285 creg_done:
286 if (cmd->cb)
287 cmd->cb(card, cmd, st);
289 kmem_cache_free(creg_cmd_pool, cmd);
291 spin_lock_bh(&card->creg_ctrl.lock);
292 card->creg_ctrl.active = 0;
293 creg_kick_queue(card);
294 spin_unlock_bh(&card->creg_ctrl.lock);
297 static void creg_reset(struct rsxx_cardinfo *card)
299 struct creg_cmd *cmd = NULL;
300 struct creg_cmd *tmp;
301 unsigned long flags;
304 * mutex_trylock is used here because if reset_lock is taken then a
305 * reset is already happening. So, we can just go ahead and return.
307 if (!mutex_trylock(&card->creg_ctrl.reset_lock))
308 return;
310 card->creg_ctrl.reset = 1;
311 spin_lock_irqsave(&card->irq_lock, flags);
312 rsxx_disable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
313 spin_unlock_irqrestore(&card->irq_lock, flags);
315 dev_warn(CARD_TO_DEV(card),
316 "Resetting creg interface for recovery\n");
318 /* Cancel outstanding commands */
319 spin_lock_bh(&card->creg_ctrl.lock);
320 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
321 list_del(&cmd->list);
322 card->creg_ctrl.q_depth--;
323 if (cmd->cb)
324 cmd->cb(card, cmd, -ECANCELED);
325 kmem_cache_free(creg_cmd_pool, cmd);
328 cmd = card->creg_ctrl.active_cmd;
329 card->creg_ctrl.active_cmd = NULL;
330 if (cmd) {
331 if (timer_pending(&card->creg_ctrl.cmd_timer))
332 del_timer_sync(&card->creg_ctrl.cmd_timer);
334 if (cmd->cb)
335 cmd->cb(card, cmd, -ECANCELED);
336 kmem_cache_free(creg_cmd_pool, cmd);
338 card->creg_ctrl.active = 0;
340 spin_unlock_bh(&card->creg_ctrl.lock);
342 card->creg_ctrl.reset = 0;
343 spin_lock_irqsave(&card->irq_lock, flags);
344 rsxx_enable_ier_and_isr(card, CR_INTR_CREG | CR_INTR_EVENT);
345 spin_unlock_irqrestore(&card->irq_lock, flags);
347 mutex_unlock(&card->creg_ctrl.reset_lock);
350 /* Used for synchronous accesses */
351 struct creg_completion {
352 struct completion *cmd_done;
353 int st;
354 u32 creg_status;
357 static void creg_cmd_done_cb(struct rsxx_cardinfo *card,
358 struct creg_cmd *cmd,
359 int st)
361 struct creg_completion *cmd_completion;
363 cmd_completion = cmd->cb_private;
364 BUG_ON(!cmd_completion);
366 cmd_completion->st = st;
367 cmd_completion->creg_status = cmd->status;
368 complete(cmd_completion->cmd_done);
371 static int __issue_creg_rw(struct rsxx_cardinfo *card,
372 unsigned int op,
373 unsigned int addr,
374 unsigned int cnt8,
375 void *buf,
376 int stream,
377 unsigned int *hw_stat)
379 DECLARE_COMPLETION_ONSTACK(cmd_done);
380 struct creg_completion completion;
381 unsigned long timeout;
382 int st;
384 completion.cmd_done = &cmd_done;
385 completion.st = 0;
386 completion.creg_status = 0;
388 st = creg_queue_cmd(card, op, addr, cnt8, buf, stream, creg_cmd_done_cb,
389 &completion);
390 if (st)
391 return st;
394 * This timeout is necessary for unresponsive hardware. The additional
395 * 20 seconds to used to guarantee that each cregs requests has time to
396 * complete.
398 timeout = msecs_to_jiffies(CREG_TIMEOUT_MSEC *
399 card->creg_ctrl.q_depth + 20000);
402 * The creg interface is guaranteed to complete. It has a timeout
403 * mechanism that will kick in if hardware does not respond.
405 st = wait_for_completion_timeout(completion.cmd_done, timeout);
406 if (st == 0) {
408 * This is really bad, because the kernel timer did not
409 * expire and notify us of a timeout!
411 dev_crit(CARD_TO_DEV(card),
412 "cregs timer failed\n");
413 creg_reset(card);
414 return -EIO;
417 *hw_stat = completion.creg_status;
419 if (completion.st) {
421 * This read is needed to verify that there has not been any
422 * extreme errors that might have occurred, i.e. EEH. The
423 * function iowrite32 will not detect EEH errors, so it is
424 * necessary that we recover if such an error is the reason
425 * for the timeout. This is a dummy read.
427 ioread32(card->regmap + SCRATCH);
429 dev_warn(CARD_TO_DEV(card),
430 "creg command failed(%d x%08x)\n",
431 completion.st, addr);
432 return completion.st;
435 return 0;
438 static int issue_creg_rw(struct rsxx_cardinfo *card,
439 u32 addr,
440 unsigned int size8,
441 void *data,
442 int stream,
443 int read)
445 unsigned int hw_stat;
446 unsigned int xfer;
447 unsigned int op;
448 int st;
450 op = read ? CREG_OP_READ : CREG_OP_WRITE;
452 do {
453 xfer = min_t(unsigned int, size8, MAX_CREG_DATA8);
455 st = __issue_creg_rw(card, op, addr, xfer,
456 data, stream, &hw_stat);
457 if (st)
458 return st;
460 data = (char *)data + xfer;
461 addr += xfer;
462 size8 -= xfer;
463 } while (size8);
465 return 0;
468 /* ---------------------------- Public API ---------------------------------- */
469 int rsxx_creg_write(struct rsxx_cardinfo *card,
470 u32 addr,
471 unsigned int size8,
472 void *data,
473 int byte_stream)
475 return issue_creg_rw(card, addr, size8, data, byte_stream, 0);
478 int rsxx_creg_read(struct rsxx_cardinfo *card,
479 u32 addr,
480 unsigned int size8,
481 void *data,
482 int byte_stream)
484 return issue_creg_rw(card, addr, size8, data, byte_stream, 1);
487 int rsxx_get_card_state(struct rsxx_cardinfo *card, unsigned int *state)
489 return rsxx_creg_read(card, CREG_ADD_CARD_STATE,
490 sizeof(*state), state, 0);
493 int rsxx_get_card_size8(struct rsxx_cardinfo *card, u64 *size8)
495 unsigned int size;
496 int st;
498 st = rsxx_creg_read(card, CREG_ADD_CARD_SIZE,
499 sizeof(size), &size, 0);
500 if (st)
501 return st;
503 *size8 = (u64)size * RSXX_HW_BLK_SIZE;
504 return 0;
507 int rsxx_get_num_targets(struct rsxx_cardinfo *card,
508 unsigned int *n_targets)
510 return rsxx_creg_read(card, CREG_ADD_NUM_TARGETS,
511 sizeof(*n_targets), n_targets, 0);
514 int rsxx_get_card_capabilities(struct rsxx_cardinfo *card,
515 u32 *capabilities)
517 return rsxx_creg_read(card, CREG_ADD_CAPABILITIES,
518 sizeof(*capabilities), capabilities, 0);
521 int rsxx_issue_card_cmd(struct rsxx_cardinfo *card, u32 cmd)
523 return rsxx_creg_write(card, CREG_ADD_CARD_CMD,
524 sizeof(cmd), &cmd, 0);
528 /*----------------- HW Log Functions -------------------*/
529 static void hw_log_msg(struct rsxx_cardinfo *card, const char *str, int len)
531 static char level;
534 * New messages start with "<#>", where # is the log level. Messages
535 * that extend past the log buffer will use the previous level
537 if ((len > 3) && (str[0] == '<') && (str[2] == '>')) {
538 level = str[1];
539 str += 3; /* Skip past the log level. */
540 len -= 3;
543 switch (level) {
544 case '0':
545 dev_emerg(CARD_TO_DEV(card), "HW: %.*s", len, str);
546 break;
547 case '1':
548 dev_alert(CARD_TO_DEV(card), "HW: %.*s", len, str);
549 break;
550 case '2':
551 dev_crit(CARD_TO_DEV(card), "HW: %.*s", len, str);
552 break;
553 case '3':
554 dev_err(CARD_TO_DEV(card), "HW: %.*s", len, str);
555 break;
556 case '4':
557 dev_warn(CARD_TO_DEV(card), "HW: %.*s", len, str);
558 break;
559 case '5':
560 dev_notice(CARD_TO_DEV(card), "HW: %.*s", len, str);
561 break;
562 case '6':
563 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
564 break;
565 case '7':
566 dev_dbg(CARD_TO_DEV(card), "HW: %.*s", len, str);
567 break;
568 default:
569 dev_info(CARD_TO_DEV(card), "HW: %.*s", len, str);
570 break;
575 * The substrncpy function copies the src string (which includes the
576 * terminating '\0' character), up to the count into the dest pointer.
577 * Returns the number of bytes copied to dest.
579 static int substrncpy(char *dest, const char *src, int count)
581 int max_cnt = count;
583 while (count) {
584 count--;
585 *dest = *src;
586 if (*dest == '\0')
587 break;
588 src++;
589 dest++;
591 return max_cnt - count;
595 static void read_hw_log_done(struct rsxx_cardinfo *card,
596 struct creg_cmd *cmd,
597 int st)
599 char *buf;
600 char *log_str;
601 int cnt;
602 int len;
603 int off;
605 buf = cmd->buf;
606 off = 0;
608 /* Failed getting the log message */
609 if (st)
610 return;
612 while (off < cmd->cnt8) {
613 log_str = &card->log.buf[card->log.buf_len];
614 cnt = min(cmd->cnt8 - off, LOG_BUF_SIZE8 - card->log.buf_len);
615 len = substrncpy(log_str, &buf[off], cnt);
617 off += len;
618 card->log.buf_len += len;
621 * Flush the log if we've hit the end of a message or if we've
622 * run out of buffer space.
624 if ((log_str[len - 1] == '\0') ||
625 (card->log.buf_len == LOG_BUF_SIZE8)) {
626 if (card->log.buf_len != 1) /* Don't log blank lines. */
627 hw_log_msg(card, card->log.buf,
628 card->log.buf_len);
629 card->log.buf_len = 0;
634 if (cmd->status & CREG_STAT_LOG_PENDING)
635 rsxx_read_hw_log(card);
638 int rsxx_read_hw_log(struct rsxx_cardinfo *card)
640 int st;
642 st = creg_queue_cmd(card, CREG_OP_READ, CREG_ADD_LOG,
643 sizeof(card->log.tmp), card->log.tmp,
644 1, read_hw_log_done, NULL);
645 if (st)
646 dev_err(CARD_TO_DEV(card),
647 "Failed getting log text\n");
649 return st;
652 /*-------------- IOCTL REG Access ------------------*/
653 static int issue_reg_cmd(struct rsxx_cardinfo *card,
654 struct rsxx_reg_access *cmd,
655 int read)
657 unsigned int op = read ? CREG_OP_READ : CREG_OP_WRITE;
659 return __issue_creg_rw(card, op, cmd->addr, cmd->cnt, cmd->data,
660 cmd->stream, &cmd->stat);
663 int rsxx_reg_access(struct rsxx_cardinfo *card,
664 struct rsxx_reg_access __user *ucmd,
665 int read)
667 struct rsxx_reg_access cmd;
668 int st;
670 st = copy_from_user(&cmd, ucmd, sizeof(cmd));
671 if (st)
672 return -EFAULT;
674 if (cmd.cnt > RSXX_MAX_REG_CNT)
675 return -EFAULT;
677 st = issue_reg_cmd(card, &cmd, read);
678 if (st)
679 return st;
681 st = put_user(cmd.stat, &ucmd->stat);
682 if (st)
683 return -EFAULT;
685 if (read) {
686 st = copy_to_user(ucmd->data, cmd.data, cmd.cnt);
687 if (st)
688 return -EFAULT;
691 return 0;
694 void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo *card)
696 struct creg_cmd *cmd = NULL;
698 cmd = card->creg_ctrl.active_cmd;
699 card->creg_ctrl.active_cmd = NULL;
701 if (cmd) {
702 del_timer_sync(&card->creg_ctrl.cmd_timer);
704 spin_lock_bh(&card->creg_ctrl.lock);
705 list_add(&cmd->list, &card->creg_ctrl.queue);
706 card->creg_ctrl.q_depth++;
707 card->creg_ctrl.active = 0;
708 spin_unlock_bh(&card->creg_ctrl.lock);
712 void rsxx_kick_creg_queue(struct rsxx_cardinfo *card)
714 spin_lock_bh(&card->creg_ctrl.lock);
715 if (!list_empty(&card->creg_ctrl.queue))
716 creg_kick_queue(card);
717 spin_unlock_bh(&card->creg_ctrl.lock);
720 /*------------ Initialization & Setup --------------*/
721 int rsxx_creg_setup(struct rsxx_cardinfo *card)
723 card->creg_ctrl.active_cmd = NULL;
725 card->creg_ctrl.creg_wq =
726 create_singlethread_workqueue(DRIVER_NAME"_creg");
727 if (!card->creg_ctrl.creg_wq)
728 return -ENOMEM;
730 INIT_WORK(&card->creg_ctrl.done_work, creg_cmd_done);
731 mutex_init(&card->creg_ctrl.reset_lock);
732 INIT_LIST_HEAD(&card->creg_ctrl.queue);
733 spin_lock_init(&card->creg_ctrl.lock);
734 timer_setup(&card->creg_ctrl.cmd_timer, creg_cmd_timed_out, 0);
736 return 0;
739 void rsxx_creg_destroy(struct rsxx_cardinfo *card)
741 struct creg_cmd *cmd;
742 struct creg_cmd *tmp;
743 int cnt = 0;
745 /* Cancel outstanding commands */
746 spin_lock_bh(&card->creg_ctrl.lock);
747 list_for_each_entry_safe(cmd, tmp, &card->creg_ctrl.queue, list) {
748 list_del(&cmd->list);
749 if (cmd->cb)
750 cmd->cb(card, cmd, -ECANCELED);
751 kmem_cache_free(creg_cmd_pool, cmd);
752 cnt++;
755 if (cnt)
756 dev_info(CARD_TO_DEV(card),
757 "Canceled %d queue creg commands\n", cnt);
759 cmd = card->creg_ctrl.active_cmd;
760 card->creg_ctrl.active_cmd = NULL;
761 if (cmd) {
762 if (timer_pending(&card->creg_ctrl.cmd_timer))
763 del_timer_sync(&card->creg_ctrl.cmd_timer);
765 if (cmd->cb)
766 cmd->cb(card, cmd, -ECANCELED);
767 dev_info(CARD_TO_DEV(card),
768 "Canceled active creg command\n");
769 kmem_cache_free(creg_cmd_pool, cmd);
771 spin_unlock_bh(&card->creg_ctrl.lock);
773 cancel_work_sync(&card->creg_ctrl.done_work);
777 int rsxx_creg_init(void)
779 creg_cmd_pool = KMEM_CACHE(creg_cmd, SLAB_HWCACHE_ALIGN);
780 if (!creg_cmd_pool)
781 return -ENOMEM;
783 return 0;
786 void rsxx_creg_cleanup(void)
788 kmem_cache_destroy(creg_cmd_pool);