5 * Authors: Joshua Morris <josh.h.morris@us.ibm.com>
6 * Philip Kelleher <pjk1939@linux.vnet.ibm.com>
8 * (C) Copyright 2013 IBM Corporation
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License as
12 * published by the Free Software Foundation; either version 2 of the
13 * License, or (at your option) any later version.
15 * This program is distributed in the hope that it will be useful, but
16 * WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
18 * General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software Foundation,
22 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
25 #include <linux/completion.h>
26 #include <linux/slab.h>
28 #include "rsxx_priv.h"
30 #define CREG_TIMEOUT_MSEC 10000
32 typedef void (*creg_cmd_cb
)(struct rsxx_cardinfo
*card
,
37 struct list_head list
;
48 static struct kmem_cache
*creg_cmd_pool
;
51 /*------------ Private Functions --------------*/
53 #if defined(__LITTLE_ENDIAN)
54 #define LITTLE_ENDIAN 1
55 #elif defined(__BIG_ENDIAN)
56 #define LITTLE_ENDIAN 0
58 #error Unknown endianess!!! Aborting...
61 static int copy_to_creg_data(struct rsxx_cardinfo
*card
,
69 if (unlikely(card
->eeh_state
))
72 for (i
= 0; cnt8
> 0; i
++, cnt8
-= 4) {
74 * Firmware implementation makes it necessary to byte swap on
75 * little endian processors.
77 if (LITTLE_ENDIAN
&& stream
)
78 iowrite32be(data
[i
], card
->regmap
+ CREG_DATA(i
));
80 iowrite32(data
[i
], card
->regmap
+ CREG_DATA(i
));
87 static int copy_from_creg_data(struct rsxx_cardinfo
*card
,
95 if (unlikely(card
->eeh_state
))
98 for (i
= 0; cnt8
> 0; i
++, cnt8
-= 4) {
100 * Firmware implementation makes it necessary to byte swap on
101 * little endian processors.
103 if (LITTLE_ENDIAN
&& stream
)
104 data
[i
] = ioread32be(card
->regmap
+ CREG_DATA(i
));
106 data
[i
] = ioread32(card
->regmap
+ CREG_DATA(i
));
112 static void creg_issue_cmd(struct rsxx_cardinfo
*card
, struct creg_cmd
*cmd
)
116 if (unlikely(card
->eeh_state
))
119 iowrite32(cmd
->addr
, card
->regmap
+ CREG_ADD
);
120 iowrite32(cmd
->cnt8
, card
->regmap
+ CREG_CNT
);
122 if (cmd
->op
== CREG_OP_WRITE
) {
124 st
= copy_to_creg_data(card
, cmd
->cnt8
,
125 cmd
->buf
, cmd
->stream
);
131 if (unlikely(card
->eeh_state
))
134 /* Setting the valid bit will kick off the command. */
135 iowrite32(cmd
->op
, card
->regmap
+ CREG_CMD
);
138 static void creg_kick_queue(struct rsxx_cardinfo
*card
)
140 if (card
->creg_ctrl
.active
|| list_empty(&card
->creg_ctrl
.queue
))
143 card
->creg_ctrl
.active
= 1;
144 card
->creg_ctrl
.active_cmd
= list_first_entry(&card
->creg_ctrl
.queue
,
145 struct creg_cmd
, list
);
146 list_del(&card
->creg_ctrl
.active_cmd
->list
);
147 card
->creg_ctrl
.q_depth
--;
150 * We have to set the timer before we push the new command. Otherwise,
151 * we could create a race condition that would occur if the timer
152 * was not canceled, and expired after the new command was pushed,
153 * but before the command was issued to hardware.
155 mod_timer(&card
->creg_ctrl
.cmd_timer
,
156 jiffies
+ msecs_to_jiffies(CREG_TIMEOUT_MSEC
));
158 creg_issue_cmd(card
, card
->creg_ctrl
.active_cmd
);
161 static int creg_queue_cmd(struct rsxx_cardinfo
*card
,
167 creg_cmd_cb callback
,
170 struct creg_cmd
*cmd
;
172 /* Don't queue stuff up if we're halted. */
173 if (unlikely(card
->halt
))
176 if (card
->creg_ctrl
.reset
)
179 if (cnt8
> MAX_CREG_DATA8
)
182 cmd
= kmem_cache_alloc(creg_cmd_pool
, GFP_KERNEL
);
186 INIT_LIST_HEAD(&cmd
->list
);
192 cmd
->stream
= stream
;
194 cmd
->cb_private
= cb_private
;
197 spin_lock_bh(&card
->creg_ctrl
.lock
);
198 list_add_tail(&cmd
->list
, &card
->creg_ctrl
.queue
);
199 card
->creg_ctrl
.q_depth
++;
200 creg_kick_queue(card
);
201 spin_unlock_bh(&card
->creg_ctrl
.lock
);
206 static void creg_cmd_timed_out(struct timer_list
*t
)
208 struct rsxx_cardinfo
*card
= from_timer(card
, t
, creg_ctrl
.cmd_timer
);
209 struct creg_cmd
*cmd
;
211 spin_lock(&card
->creg_ctrl
.lock
);
212 cmd
= card
->creg_ctrl
.active_cmd
;
213 card
->creg_ctrl
.active_cmd
= NULL
;
214 spin_unlock(&card
->creg_ctrl
.lock
);
217 card
->creg_ctrl
.creg_stats
.creg_timeout
++;
218 dev_warn(CARD_TO_DEV(card
),
219 "No active command associated with timeout!\n");
224 cmd
->cb(card
, cmd
, -ETIMEDOUT
);
226 kmem_cache_free(creg_cmd_pool
, cmd
);
229 spin_lock(&card
->creg_ctrl
.lock
);
230 card
->creg_ctrl
.active
= 0;
231 creg_kick_queue(card
);
232 spin_unlock(&card
->creg_ctrl
.lock
);
236 static void creg_cmd_done(struct work_struct
*work
)
238 struct rsxx_cardinfo
*card
;
239 struct creg_cmd
*cmd
;
242 card
= container_of(work
, struct rsxx_cardinfo
,
243 creg_ctrl
.done_work
);
246 * The timer could not be cancelled for some reason,
247 * race to pop the active command.
249 if (del_timer_sync(&card
->creg_ctrl
.cmd_timer
) == 0)
250 card
->creg_ctrl
.creg_stats
.failed_cancel_timer
++;
252 spin_lock_bh(&card
->creg_ctrl
.lock
);
253 cmd
= card
->creg_ctrl
.active_cmd
;
254 card
->creg_ctrl
.active_cmd
= NULL
;
255 spin_unlock_bh(&card
->creg_ctrl
.lock
);
258 dev_err(CARD_TO_DEV(card
),
259 "Spurious creg interrupt!\n");
263 card
->creg_ctrl
.creg_stats
.stat
= ioread32(card
->regmap
+ CREG_STAT
);
264 cmd
->status
= card
->creg_ctrl
.creg_stats
.stat
;
265 if ((cmd
->status
& CREG_STAT_STATUS_MASK
) == 0) {
266 dev_err(CARD_TO_DEV(card
),
267 "Invalid status on creg command\n");
269 * At this point we're probably reading garbage from HW. Don't
270 * do anything else that could mess up the system and let
271 * the sync function return an error.
275 } else if (cmd
->status
& CREG_STAT_ERROR
) {
279 if (cmd
->op
== CREG_OP_READ
) {
280 unsigned int cnt8
= ioread32(card
->regmap
+ CREG_CNT
);
282 /* Paranoid Sanity Checks */
284 dev_err(CARD_TO_DEV(card
),
285 "Buffer not given for read.\n");
289 if (cnt8
!= cmd
->cnt8
) {
290 dev_err(CARD_TO_DEV(card
),
296 st
= copy_from_creg_data(card
, cnt8
, cmd
->buf
, cmd
->stream
);
301 cmd
->cb(card
, cmd
, st
);
303 kmem_cache_free(creg_cmd_pool
, cmd
);
305 spin_lock_bh(&card
->creg_ctrl
.lock
);
306 card
->creg_ctrl
.active
= 0;
307 creg_kick_queue(card
);
308 spin_unlock_bh(&card
->creg_ctrl
.lock
);
311 static void creg_reset(struct rsxx_cardinfo
*card
)
313 struct creg_cmd
*cmd
= NULL
;
314 struct creg_cmd
*tmp
;
318 * mutex_trylock is used here because if reset_lock is taken then a
319 * reset is already happening. So, we can just go ahead and return.
321 if (!mutex_trylock(&card
->creg_ctrl
.reset_lock
))
324 card
->creg_ctrl
.reset
= 1;
325 spin_lock_irqsave(&card
->irq_lock
, flags
);
326 rsxx_disable_ier_and_isr(card
, CR_INTR_CREG
| CR_INTR_EVENT
);
327 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
329 dev_warn(CARD_TO_DEV(card
),
330 "Resetting creg interface for recovery\n");
332 /* Cancel outstanding commands */
333 spin_lock_bh(&card
->creg_ctrl
.lock
);
334 list_for_each_entry_safe(cmd
, tmp
, &card
->creg_ctrl
.queue
, list
) {
335 list_del(&cmd
->list
);
336 card
->creg_ctrl
.q_depth
--;
338 cmd
->cb(card
, cmd
, -ECANCELED
);
339 kmem_cache_free(creg_cmd_pool
, cmd
);
342 cmd
= card
->creg_ctrl
.active_cmd
;
343 card
->creg_ctrl
.active_cmd
= NULL
;
345 if (timer_pending(&card
->creg_ctrl
.cmd_timer
))
346 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
349 cmd
->cb(card
, cmd
, -ECANCELED
);
350 kmem_cache_free(creg_cmd_pool
, cmd
);
352 card
->creg_ctrl
.active
= 0;
354 spin_unlock_bh(&card
->creg_ctrl
.lock
);
356 card
->creg_ctrl
.reset
= 0;
357 spin_lock_irqsave(&card
->irq_lock
, flags
);
358 rsxx_enable_ier_and_isr(card
, CR_INTR_CREG
| CR_INTR_EVENT
);
359 spin_unlock_irqrestore(&card
->irq_lock
, flags
);
361 mutex_unlock(&card
->creg_ctrl
.reset_lock
);
364 /* Used for synchronous accesses */
365 struct creg_completion
{
366 struct completion
*cmd_done
;
371 static void creg_cmd_done_cb(struct rsxx_cardinfo
*card
,
372 struct creg_cmd
*cmd
,
375 struct creg_completion
*cmd_completion
;
377 cmd_completion
= cmd
->cb_private
;
378 BUG_ON(!cmd_completion
);
380 cmd_completion
->st
= st
;
381 cmd_completion
->creg_status
= cmd
->status
;
382 complete(cmd_completion
->cmd_done
);
385 static int __issue_creg_rw(struct rsxx_cardinfo
*card
,
391 unsigned int *hw_stat
)
393 DECLARE_COMPLETION_ONSTACK(cmd_done
);
394 struct creg_completion completion
;
395 unsigned long timeout
;
398 completion
.cmd_done
= &cmd_done
;
400 completion
.creg_status
= 0;
402 st
= creg_queue_cmd(card
, op
, addr
, cnt8
, buf
, stream
, creg_cmd_done_cb
,
408 * This timeout is necessary for unresponsive hardware. The additional
409 * 20 seconds to used to guarantee that each cregs requests has time to
412 timeout
= msecs_to_jiffies(CREG_TIMEOUT_MSEC
*
413 card
->creg_ctrl
.q_depth
+ 20000);
416 * The creg interface is guaranteed to complete. It has a timeout
417 * mechanism that will kick in if hardware does not respond.
419 st
= wait_for_completion_timeout(completion
.cmd_done
, timeout
);
422 * This is really bad, because the kernel timer did not
423 * expire and notify us of a timeout!
425 dev_crit(CARD_TO_DEV(card
),
426 "cregs timer failed\n");
431 *hw_stat
= completion
.creg_status
;
435 * This read is needed to verify that there has not been any
436 * extreme errors that might have occurred, i.e. EEH. The
437 * function iowrite32 will not detect EEH errors, so it is
438 * necessary that we recover if such an error is the reason
439 * for the timeout. This is a dummy read.
441 ioread32(card
->regmap
+ SCRATCH
);
443 dev_warn(CARD_TO_DEV(card
),
444 "creg command failed(%d x%08x)\n",
445 completion
.st
, addr
);
446 return completion
.st
;
452 static int issue_creg_rw(struct rsxx_cardinfo
*card
,
459 unsigned int hw_stat
;
464 op
= read
? CREG_OP_READ
: CREG_OP_WRITE
;
467 xfer
= min_t(unsigned int, size8
, MAX_CREG_DATA8
);
469 st
= __issue_creg_rw(card
, op
, addr
, xfer
,
470 data
, stream
, &hw_stat
);
474 data
= (char *)data
+ xfer
;
482 /* ---------------------------- Public API ---------------------------------- */
483 int rsxx_creg_write(struct rsxx_cardinfo
*card
,
489 return issue_creg_rw(card
, addr
, size8
, data
, byte_stream
, 0);
492 int rsxx_creg_read(struct rsxx_cardinfo
*card
,
498 return issue_creg_rw(card
, addr
, size8
, data
, byte_stream
, 1);
501 int rsxx_get_card_state(struct rsxx_cardinfo
*card
, unsigned int *state
)
503 return rsxx_creg_read(card
, CREG_ADD_CARD_STATE
,
504 sizeof(*state
), state
, 0);
507 int rsxx_get_card_size8(struct rsxx_cardinfo
*card
, u64
*size8
)
512 st
= rsxx_creg_read(card
, CREG_ADD_CARD_SIZE
,
513 sizeof(size
), &size
, 0);
517 *size8
= (u64
)size
* RSXX_HW_BLK_SIZE
;
521 int rsxx_get_num_targets(struct rsxx_cardinfo
*card
,
522 unsigned int *n_targets
)
524 return rsxx_creg_read(card
, CREG_ADD_NUM_TARGETS
,
525 sizeof(*n_targets
), n_targets
, 0);
528 int rsxx_get_card_capabilities(struct rsxx_cardinfo
*card
,
531 return rsxx_creg_read(card
, CREG_ADD_CAPABILITIES
,
532 sizeof(*capabilities
), capabilities
, 0);
535 int rsxx_issue_card_cmd(struct rsxx_cardinfo
*card
, u32 cmd
)
537 return rsxx_creg_write(card
, CREG_ADD_CARD_CMD
,
538 sizeof(cmd
), &cmd
, 0);
542 /*----------------- HW Log Functions -------------------*/
543 static void hw_log_msg(struct rsxx_cardinfo
*card
, const char *str
, int len
)
548 * New messages start with "<#>", where # is the log level. Messages
549 * that extend past the log buffer will use the previous level
551 if ((len
> 3) && (str
[0] == '<') && (str
[2] == '>')) {
553 str
+= 3; /* Skip past the log level. */
559 dev_emerg(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
562 dev_alert(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
565 dev_crit(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
568 dev_err(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
571 dev_warn(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
574 dev_notice(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
577 dev_info(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
580 dev_dbg(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
583 dev_info(CARD_TO_DEV(card
), "HW: %.*s", len
, str
);
589 * The substrncpy function copies the src string (which includes the
590 * terminating '\0' character), up to the count into the dest pointer.
591 * Returns the number of bytes copied to dest.
593 static int substrncpy(char *dest
, const char *src
, int count
)
605 return max_cnt
- count
;
609 static void read_hw_log_done(struct rsxx_cardinfo
*card
,
610 struct creg_cmd
*cmd
,
622 /* Failed getting the log message */
626 while (off
< cmd
->cnt8
) {
627 log_str
= &card
->log
.buf
[card
->log
.buf_len
];
628 cnt
= min(cmd
->cnt8
- off
, LOG_BUF_SIZE8
- card
->log
.buf_len
);
629 len
= substrncpy(log_str
, &buf
[off
], cnt
);
632 card
->log
.buf_len
+= len
;
635 * Flush the log if we've hit the end of a message or if we've
636 * run out of buffer space.
638 if ((log_str
[len
- 1] == '\0') ||
639 (card
->log
.buf_len
== LOG_BUF_SIZE8
)) {
640 if (card
->log
.buf_len
!= 1) /* Don't log blank lines. */
641 hw_log_msg(card
, card
->log
.buf
,
643 card
->log
.buf_len
= 0;
648 if (cmd
->status
& CREG_STAT_LOG_PENDING
)
649 rsxx_read_hw_log(card
);
652 int rsxx_read_hw_log(struct rsxx_cardinfo
*card
)
656 st
= creg_queue_cmd(card
, CREG_OP_READ
, CREG_ADD_LOG
,
657 sizeof(card
->log
.tmp
), card
->log
.tmp
,
658 1, read_hw_log_done
, NULL
);
660 dev_err(CARD_TO_DEV(card
),
661 "Failed getting log text\n");
666 /*-------------- IOCTL REG Access ------------------*/
667 static int issue_reg_cmd(struct rsxx_cardinfo
*card
,
668 struct rsxx_reg_access
*cmd
,
671 unsigned int op
= read
? CREG_OP_READ
: CREG_OP_WRITE
;
673 return __issue_creg_rw(card
, op
, cmd
->addr
, cmd
->cnt
, cmd
->data
,
674 cmd
->stream
, &cmd
->stat
);
677 int rsxx_reg_access(struct rsxx_cardinfo
*card
,
678 struct rsxx_reg_access __user
*ucmd
,
681 struct rsxx_reg_access cmd
;
684 st
= copy_from_user(&cmd
, ucmd
, sizeof(cmd
));
688 if (cmd
.cnt
> RSXX_MAX_REG_CNT
)
691 st
= issue_reg_cmd(card
, &cmd
, read
);
695 st
= put_user(cmd
.stat
, &ucmd
->stat
);
700 st
= copy_to_user(ucmd
->data
, cmd
.data
, cmd
.cnt
);
708 void rsxx_eeh_save_issued_creg(struct rsxx_cardinfo
*card
)
710 struct creg_cmd
*cmd
= NULL
;
712 cmd
= card
->creg_ctrl
.active_cmd
;
713 card
->creg_ctrl
.active_cmd
= NULL
;
716 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
718 spin_lock_bh(&card
->creg_ctrl
.lock
);
719 list_add(&cmd
->list
, &card
->creg_ctrl
.queue
);
720 card
->creg_ctrl
.q_depth
++;
721 card
->creg_ctrl
.active
= 0;
722 spin_unlock_bh(&card
->creg_ctrl
.lock
);
726 void rsxx_kick_creg_queue(struct rsxx_cardinfo
*card
)
728 spin_lock_bh(&card
->creg_ctrl
.lock
);
729 if (!list_empty(&card
->creg_ctrl
.queue
))
730 creg_kick_queue(card
);
731 spin_unlock_bh(&card
->creg_ctrl
.lock
);
734 /*------------ Initialization & Setup --------------*/
735 int rsxx_creg_setup(struct rsxx_cardinfo
*card
)
737 card
->creg_ctrl
.active_cmd
= NULL
;
739 card
->creg_ctrl
.creg_wq
=
740 create_singlethread_workqueue(DRIVER_NAME
"_creg");
741 if (!card
->creg_ctrl
.creg_wq
)
744 INIT_WORK(&card
->creg_ctrl
.done_work
, creg_cmd_done
);
745 mutex_init(&card
->creg_ctrl
.reset_lock
);
746 INIT_LIST_HEAD(&card
->creg_ctrl
.queue
);
747 spin_lock_init(&card
->creg_ctrl
.lock
);
748 timer_setup(&card
->creg_ctrl
.cmd_timer
, creg_cmd_timed_out
, 0);
753 void rsxx_creg_destroy(struct rsxx_cardinfo
*card
)
755 struct creg_cmd
*cmd
;
756 struct creg_cmd
*tmp
;
759 /* Cancel outstanding commands */
760 spin_lock_bh(&card
->creg_ctrl
.lock
);
761 list_for_each_entry_safe(cmd
, tmp
, &card
->creg_ctrl
.queue
, list
) {
762 list_del(&cmd
->list
);
764 cmd
->cb(card
, cmd
, -ECANCELED
);
765 kmem_cache_free(creg_cmd_pool
, cmd
);
770 dev_info(CARD_TO_DEV(card
),
771 "Canceled %d queue creg commands\n", cnt
);
773 cmd
= card
->creg_ctrl
.active_cmd
;
774 card
->creg_ctrl
.active_cmd
= NULL
;
776 if (timer_pending(&card
->creg_ctrl
.cmd_timer
))
777 del_timer_sync(&card
->creg_ctrl
.cmd_timer
);
780 cmd
->cb(card
, cmd
, -ECANCELED
);
781 dev_info(CARD_TO_DEV(card
),
782 "Canceled active creg command\n");
783 kmem_cache_free(creg_cmd_pool
, cmd
);
785 spin_unlock_bh(&card
->creg_ctrl
.lock
);
787 cancel_work_sync(&card
->creg_ctrl
.done_work
);
791 int rsxx_creg_init(void)
793 creg_cmd_pool
= KMEM_CACHE(creg_cmd
, SLAB_HWCACHE_ALIGN
);
800 void rsxx_creg_cleanup(void)
802 kmem_cache_destroy(creg_cmd_pool
);