1 // SPDX-License-Identifier: GPL-2.0-or-later
5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
11 #include <linux/completion.h>
12 #include <linux/slab.h>
14 #include "rsxx_priv.h"
16 #define CREG_TIMEOUT_MSEC 10000
18 typedef void (*creg_cmd_cb
)(struct rsxx_cardinfo
*card
,
23 struct list_head list
;
34 static struct kmem_cache
*creg_cmd_pool
;
37 /*------------ Private Functions --------------*/
39 #if defined(__LITTLE_ENDIAN)
40 #define LITTLE_ENDIAN 1
41 #elif defined(__BIG_ENDIAN)
42 #define LITTLE_ENDIAN 0
44 #error Unknown endianess!!! Aborting...
47 static int copy_to_creg_data(struct rsxx_cardinfo
*card
,
55 if (unlikely(card
->eeh_state
))
58 for (i
= 0; cnt8
> 0; i
++, cnt8
-= 4) {
60 * Firmware implementation makes it necessary to byte swap on
61 * little endian processors.
63 if (LITTLE_ENDIAN
&& stream
)
64 iowrite32be(data
[i
], card
->regmap
+ CREG_DATA(i
));
66 iowrite32(data
[i
], card
->regmap
+ CREG_DATA(i
));
73 static int copy_from_creg_data(struct rsxx_cardinfo
*card
,
81 if (unlikely(card
->eeh_state
))
84 for (i
= 0; cnt8
> 0; i
++, cnt8
-= 4) {
86 * Firmware implementation makes it necessary to byte swap on
87 * little endian processors.
89 if (LITTLE_ENDIAN
&& stream
)
90 data
[i
] = ioread32be(card
->regmap
+ CREG_DATA(i
));
92 data
[i
] = ioread32(card
->regmap
+ CREG_DATA(i
));
98 static void creg_issue_cmd(struct rsxx_cardinfo
*card
, struct creg_cmd
*cmd
)
102 if (unlikely(card
->eeh_state
))
105 iowrite32(cmd
->addr
, card
->regmap
+ CREG_ADD
);
106 iowrite32(cmd
->cnt8
, card
->regmap
+ CREG_CNT
);
108 if (cmd
->op
== CREG_OP_WRITE
) {
110 st
= copy_to_creg_data(card
, cmd
->cnt8
,
111 cmd
->buf
, cmd
->stream
);
117 if (unlikely(card
->eeh_state
))
120 /* Setting the valid bit will kick off the command. */
121 iowrite32(cmd
->op
, card
->regmap
+ CREG_CMD
);
124 static void creg_kick_queue(struct rsxx_cardinfo
*card
)
126 if (card
->creg_ctrl
.active
|| list_empty(&card
->creg_ctrl
.queue
))
129 card
->creg_ctrl
.active
= 1;
130 card
->creg_ctrl
.active_cmd
= list_first_entry(&card
->creg_ctrl
.queue
,
131 struct creg_cmd
, list
);
132 list_del(&card
->creg_ctrl
.active_cmd
->list
);
133 card
->creg_ctrl
.q_depth
--;
136 * We have to set the timer before we push the new command. Otherwise,
137 * we could create a race condition that would occur if the timer
138 * was not canceled, and expired after the new command was pushed,
139 * but before the command was issued to hardware.
141 mod_timer(&card
->creg_ctrl
.cmd_timer
,
142 jiffies
+ msecs_to_jiffies(CREG_TIMEOUT_MSEC
));
144 creg_issue_cmd(card
, card
->creg_ctrl
.active_cmd
);
147 static int creg_queue_cmd(struct rsxx_cardinfo
*card
,
153 creg_cmd_cb callback
,
156 struct creg_cmd
*cmd
;
158 /* Don't queue stuff up if we're halted. */
159 if (unlikely(card
->halt
))
162 if (card
->creg_ctrl
.reset
)
165 if (cnt8
> MAX_CREG_DATA8
)
168 cmd
= kmem_cache_alloc(creg_cmd_pool
, GFP_KERNEL
);
172 INIT_LIST_HEAD(&cmd
->list
);
178 cmd
->stream
= stream
;
180 cmd
->cb_private
= cb_private
;
183 spin_lock_bh(&card
->creg_ctrl
.lock
);
184 list_add_tail(&cmd
->list
, &card
->creg_ctrl
.queue
);
185 card
->creg_ctrl
.q_depth
++;
186 creg_kick_queue(card
);
187 spin_unlock_bh(&card
->creg_ctrl
.lock
);
192 static void creg_cmd_timed_out(struct timer_list
*t
)
194 struct rsxx_cardinfo
*card
= from_timer(card
, t
, creg_ctrl
.cmd_timer
);
195 struct creg_cmd
*cmd
;
197 spin_lock(&card
->creg_ctrl
.lock
);
198 cmd
= card
->creg_ctrl
.active_cmd
;
199 card
->creg_ctrl
.active_cmd
= NULL
;
200 spin_unlock(&card
->creg_ctrl
.lock
);
203 card
->creg_ctrl
.creg_stats
.creg_timeout
++;
204 dev_warn(CARD_TO_DEV(card
),
205 "No active command associated with timeout!\n");
210 cmd
->cb(card
, cmd
, -ETIMEDOUT
);
212 kmem_cache_free(creg_cmd_pool
, cmd
);
215 spin_lock(&card
->creg_ctrl
.lock
);
216 card
->creg_ctrl
.active
= 0;
217 creg_kick_queue(card
);
218 spin_unlock(&card
->creg_ctrl
.lock
);
222 static void creg_cmd_done(struct work_struct
*work
)
224 struct rsxx_cardinfo
*card
;
225 struct creg_cmd
*cmd
;
228 card
= container_of(work
, struct rsxx_cardinfo
,
229 creg_ctrl
.done_work
);
232 * The timer could not be cancelled for some reason,
233 * race to pop the active command.
235 if (del_timer_sync(&card
->creg_ctrl
.cmd_timer
) == 0)
236 card
->creg_ctrl
.creg_stats
.failed_cancel_timer
++;
238 spin_lock_bh(&card
->creg_ctrl
.lock
);
239 cmd
= card
->creg_ctrl
.active_cmd
;
240 card
->creg_ctrl
.active_cmd
= NULL
;
241 spin_unlock_bh(&card
->creg_ctrl
.lock
);
244 dev_err(CARD_TO_DEV(card
),
245 "Spurious creg interrupt!\n");
249 card
->creg_ctrl
.creg_stats
.stat
= ioread32(card
->regmap
+ CREG_STAT
);
250 cmd
->status
= card
->creg_ctrl
.creg_stats
.stat
;
251 if ((cmd
->status
& CREG_STAT_STATUS_MASK
) == 0) {
252 dev_err(CARD_TO_DEV(card
),
253 "Invalid status on creg command\n");
255 * At this point we're probably reading garbage from HW. Don't
256 * do anything else that could mess up the system and let
257 * the sync function return an error.
261 } else if (cmd
->status
& CREG_STAT_ERROR
) {
265 if (cmd
->op
== CREG_OP_READ
) {
266 unsigned int cnt8
= ioread32(card
->regmap
+ CREG_CNT
);
268 /* Paranoid Sanity Checks */
270 dev_err(CARD_TO_DEV(card
),
271 "Buffer not given for read.\n");
275 if (cnt8
!= cmd
->cnt8
) {
276 dev_err(CARD_TO_DEV(card
),
282 st
= copy_from_creg_data(card
, cnt8
, cmd
->buf
, cmd
->stream
);
287 cmd
->cb(card
, cmd
, st
);
289 kmem_cache_free(creg_cmd_pool
, cmd
);
291 spin_lock_bh(&card
->creg_ctrl
.lock
);
292 card
->creg_ctrl
.active
= 0;
293 creg_kick_queue(card
);
294 spin_unlock_bh(&card
->creg_ctrl
.lock
);
297 static void creg_reset(struct rsxx_cardinfo
*card
)
299 struct creg_cmd
*cmd
= NULL
;
300 struct creg_cmd
*tmp
;
304 * mutex_trylock is used here because if reset_lock is taken then a
305 * reset is already happening. So, we can just go ahead and return.
307 if (!mutex_trylock(&card
->creg_ctrl
.reset_lock
))
310 card
->creg_ctrl
.reset
= 1;
311 spin_lock_irqsave(&card
->irq_lock
, flags
);
312 rsxx_disable_ier_and_isr(card
, CR_INTR_CREG
| CR_INTR_EVENT
);
313 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
315 dev_warn(CARD_TO_DEV(card
),
316 "Resetting creg interface for recovery\n");
318 /* Cancel outstanding commands */
319 spin_lock_bh(&card
->creg_ctrl
.lock
);
320 list_for_each_entry_safe(cmd
, tmp
, &card
->creg_ctrl
.queue
, list
) {
321 list_del(&cmd
->list
);
322 card
->creg_ctrl
.q_depth
--;
324 cmd
->cb(card
, cmd
, -ECANCELED
);
325 kmem_cache_free(creg_cmd_pool
, cmd
);
328 cmd
= card
->creg_ctrl
.active_cmd
;
329 card
->creg_ctrl
.active_cmd
= NULL
;
331 if (timer_pending(&card
->creg_ctrl
.cmd_timer
))
332 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
335 cmd
->cb(card
, cmd
, -ECANCELED
);
336 kmem_cache_free(creg_cmd_pool
, cmd
);
338 card
->creg_ctrl
.active
= 0;
340 spin_unlock_bh(&card
->creg_ctrl
.lock
);
342 card
->creg_ctrl
.reset
= 0;
343 spin_lock_irqsave(&card
->irq_lock
, flags
);
344 rsxx_enable_ier_and_isr(card
, CR_INTR_CREG
| CR_INTR_EVENT
);
345 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
347 mutex_unlock(&card
->creg_ctrl
.reset_lock
);
350 /* Used for synchronous accesses */
351 struct creg_completion
{
352 struct completion
*cmd_done
;
357 static void creg_cmd_done_cb(struct rsxx_cardinfo
*card
,
358 struct creg_cmd
*cmd
,
361 struct creg_completion
*cmd_completion
;
363 cmd_completion
= cmd
->cb_private
;
364 BUG_ON(!cmd_completion
);
366 cmd_completion
->st
= st
;
367 cmd_completion
->creg_status
= cmd
->status
;
368 complete(cmd_completion
->cmd_done
);
371 static int __issue_creg_rw(struct rsxx_cardinfo
*card
,
377 unsigned int *hw_stat
)
379 DECLARE_COMPLETION_ONSTACK(cmd_done
);
380 struct creg_completion completion
;
381 unsigned long timeout
;
384 completion
.cmd_done
= &cmd_done
;
386 completion
.creg_status
= 0;
388 st
= creg_queue_cmd(card
, op
, addr
, cnt8
, buf
, stream
, creg_cmd_done_cb
,
394 * This timeout is necessary for unresponsive hardware. The additional
395 * 20 seconds to used to guarantee that each cregs requests has time to
398 timeout
= msecs_to_jiffies(CREG_TIMEOUT_MSEC
*
399 card
->creg_ctrl
.q_depth
+ 20000);
402 * The creg interface is guaranteed to complete. It has a timeout
403 * mechanism that will kick in if hardware does not respond.
405 st
= wait_for_completion_timeout(completion
.cmd_done
, timeout
);
408 * This is really bad, because the kernel timer did not
409 * expire and notify us of a timeout!
411 dev_crit(CARD_TO_DEV(card
),
412 "cregs timer failed\n");
417 *hw_stat
= completion
.creg_status
;
421 * This read is needed to verify that there has not been any
422 * extreme errors that might have occurred, i.e. EEH. The
423 * function iowrite32 will not detect EEH errors, so it is
424 * necessary that we recover if such an error is the reason
425 * for the timeout. This is a dummy read.
427 ioread32(card
->regmap
+ SCRATCH
);
429 dev_warn(CARD_TO_DEV(card
),
430 "creg command failed(%d x%08x)\n",
431 completion
.st
, addr
);
432 return completion
.st
;
438 static int issue_creg_rw(struct rsxx_cardinfo
*card
,
445 unsigned int hw_stat
;
450 op
= read
? CREG_OP_READ
: CREG_OP_WRITE
;
453 xfer
= min_t(unsigned int, size8
, MAX_CREG_DATA8
);
455 st
= __issue_creg_rw(card
, op
, addr
, xfer
,
456 data
, stream
, &hw_stat
);
460 data
= (char *)data
+ xfer
;
468 /* ---------------------------- Public API ---------------------------------- */
469 int rsxx_creg_write(struct rsxx_cardinfo
*card
,
475 return issue_creg_rw(card
, addr
, size8
, data
, byte_stream
, 0);
478 int rsxx_creg_read(struct rsxx_cardinfo
*card
,
484 return issue_creg_rw(card
, addr
, size8
, data
, byte_stream
, 1);
487 int rsxx_get_card_state(struct rsxx_cardinfo
*card
, unsigned int *state
)
489 return rsxx_creg_read(card
, CREG_ADD_CARD_STATE
,
490 sizeof(*state
), state
, 0);
493 int rsxx_get_card_size8(struct rsxx_cardinfo
*card
, u64
*size8
)
498 st
= rsxx_creg_read(card
, CREG_ADD_CARD_SIZE
,
499 sizeof(size
), &size
, 0);
503 *size8
= (u64
)size
* RSXX_HW_BLK_SIZE
;
507 int rsxx_get_num_targets(struct rsxx_cardinfo
*card
,
508 unsigned int *n_targets
)
510 return rsxx_creg_read(card
, CREG_ADD_NUM_TARGETS
,
511 sizeof(*n_targets
), n_targets
, 0);
514 int rsxx_get_card_capabilities(struct rsxx_cardinfo
*card
,
517 return rsxx_creg_read(card
, CREG_ADD_CAPABILITIES
,
518 sizeof(*capabilities
), capabilities
, 0);
521 int rsxx_issue_card_cmd(struct rsxx_cardinfo
*card
, u32 cmd
)
523 return rsxx_creg_write(card
, CREG_ADD_CARD_CMD
,
524 sizeof(cmd
), &cmd
, 0);
528 /*----------------- HW Log Functions -------------------*/
529 static void hw_log_msg(struct rsxx_cardinfo
*card
, const char *str
, int len
)
534 * New messages start with "<#>", where # is the log level. Messages
535 * that extend past the log buffer will use the previous level
537 if ((len
> 3) && (str
[0] == '<') && (str
[2] == '>')) {
539 str
+= 3; /* Skip past the log level. */
545 dev_emerg(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
548 dev_alert(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
551 dev_crit(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
554 dev_err(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
557 dev_warn(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
560 dev_notice(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
563 dev_info(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
566 dev_dbg(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
569 dev_info(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
575 * The substrncpy function copies the src string (which includes the
576 * terminating '\0' character), up to the count into the dest pointer.
577 * Returns the number of bytes copied to dest.
579 static int substrncpy(char *dest
, const char *src
, int count
)
591 return max_cnt
- count
;
595 static void read_hw_log_done(struct rsxx_cardinfo
*card
,
596 struct creg_cmd
*cmd
,
608 /* Failed getting the log message */
612 while (off
< cmd
->cnt8
) {
613 log_str
= &card
->log
.buf
[card
->log
.buf_len
];
614 cnt
= min(cmd
->cnt8
- off
, LOG_BUF_SIZE8
- card
->log
.buf_len
);
615 len
= substrncpy(log_str
, &buf
[off
], cnt
);
618 card
->log
.buf_len
+= len
;
621 * Flush the log if we've hit the end of a message or if we've
622 * run out of buffer space.
624 if ((log_str
[len
- 1] == '\0') ||
625 (card
->log
.buf_len
== LOG_BUF_SIZE8
)) {
626 if (card
->log
.buf_len
!= 1) /* Don't log blank lines. */
627 hw_log_msg(card
, card
->log
.buf
,
629 card
->log
.buf_len
= 0;
634 if (cmd
->status
& CREG_STAT_LOG_PENDING
)
635 rsxx_read_hw_log(card
);
638 int rsxx_read_hw_log(struct rsxx_cardinfo
*card
)
642 st
= creg_queue_cmd(card
, CREG_OP_READ
, CREG_ADD_LOG
,
643 sizeof(card
->log
.tmp
), card
->log
.tmp
,
644 1, read_hw_log_done
, NULL
);
646 dev_err(CARD_TO_DEV(card
),
647 "Failed getting log text\n");
652 /*-------------- IOCTL REG Access ------------------*/
653 static int issue_reg_cmd(struct rsxx_cardinfo
*card
,
654 struct rsxx_reg_access
*cmd
,
657 unsigned int op
= read
? CREG_OP_READ
: CREG_OP_WRITE
;
659 return __issue_creg_rw(card
, op
, cmd
->addr
, cmd
->cnt
, cmd
->data
,
660 cmd
->stream
, &cmd
->stat
);
663 int rsxx_reg_access(struct rsxx_cardinfo
*card
,
664 struct rsxx_reg_access __user
*ucmd
,
667 struct rsxx_reg_access cmd
;
670 st
= copy_from_user(&cmd
, ucmd
, sizeof(cmd
));
674 if (cmd
.cnt
> RSXX_MAX_REG_CNT
)
677 st
= issue_reg_cmd(card
, &cmd
, read
);
681 st
= put_user(cmd
.stat
, &ucmd
->stat
);
686 st
= copy_to_user(ucmd
->data
, cmd
.data
, cmd
.cnt
);
694 void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo
*card
)
696 struct creg_cmd
*cmd
= NULL
;
698 cmd
= card
->creg_ctrl
.active_cmd
;
699 card
->creg_ctrl
.active_cmd
= NULL
;
702 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
704 spin_lock_bh(&card
->creg_ctrl
.lock
);
705 list_add(&cmd
->list
, &card
->creg_ctrl
.queue
);
706 card
->creg_ctrl
.q_depth
++;
707 card
->creg_ctrl
.active
= 0;
708 spin_unlock_bh(&card
->creg_ctrl
.lock
);
712 void rsxx_kick_creg_queue(struct rsxx_cardinfo
*card
)
714 spin_lock_bh(&card
->creg_ctrl
.lock
);
715 if (!list_empty(&card
->creg_ctrl
.queue
))
716 creg_kick_queue(card
);
717 spin_unlock_bh(&card
->creg_ctrl
.lock
);
720 /*------------ Initialization & Setup --------------*/
721 int rsxx_creg_setup(struct rsxx_cardinfo
*card
)
723 card
->creg_ctrl
.active_cmd
= NULL
;
725 card
->creg_ctrl
.creg_wq
=
726 create_singlethread_workqueue(DRIVER_NAME
"_creg");
727 if (!card
->creg_ctrl
.creg_wq
)
730 INIT_WORK(&card
->creg_ctrl
.done_work
, creg_cmd_done
);
731 mutex_init(&card
->creg_ctrl
.reset_lock
);
732 INIT_LIST_HEAD(&card
->creg_ctrl
.queue
);
733 spin_lock_init(&card
->creg_ctrl
.lock
);
734 timer_setup(&card
->creg_ctrl
.cmd_timer
, creg_cmd_timed_out
, 0);
739 void rsxx_creg_destroy(struct rsxx_cardinfo
*card
)
741 struct creg_cmd
*cmd
;
742 struct creg_cmd
*tmp
;
745 /* Cancel outstanding commands */
746 spin_lock_bh(&card
->creg_ctrl
.lock
);
747 list_for_each_entry_safe(cmd
, tmp
, &card
->creg_ctrl
.queue
, list
) {
748 list_del(&cmd
->list
);
750 cmd
->cb(card
, cmd
, -ECANCELED
);
751 kmem_cache_free(creg_cmd_pool
, cmd
);
756 dev_info(CARD_TO_DEV(card
),
757 "Canceled %d queue creg commands\n", cnt
);
759 cmd
= card
->creg_ctrl
.active_cmd
;
760 card
->creg_ctrl
.active_cmd
= NULL
;
762 if (timer_pending(&card
->creg_ctrl
.cmd_timer
))
763 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
766 cmd
->cb(card
, cmd
, -ECANCELED
);
767 dev_info(CARD_TO_DEV(card
),
768 "Canceled active creg command\n");
769 kmem_cache_free(creg_cmd_pool
, cmd
);
771 spin_unlock_bh(&card
->creg_ctrl
.lock
);
773 cancel_work_sync(&card
->creg_ctrl
.done_work
);
777 int rsxx_creg_init(void)
779 creg_cmd_pool
= KMEM_CACHE(creg_cmd
, SLAB_HWCACHE_ALIGN
);
786 void rsxx_creg_cleanup(void)
788 kmem_cache_destroy(creg_cmd_pool
);