2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * Maintained at www.Open-FCoE.org
23 * Fibre Channel exchange and sequence handling.
26 #include <linux/timer.h>
27 #include <linux/gfp.h>
28 #include <linux/err.h>
30 #include <scsi/fc/fc_fc2.h>
32 #include <scsi/libfc.h>
33 #include <scsi/fc_encode.h>
35 static struct kmem_cache
*fc_em_cachep
; /* cache for exchanges */
38 * Structure and function definitions for managing Fibre Channel Exchanges
41 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
43 * fc_exch_mgr holds the exchange state for an N port
45 * fc_exch holds state for one exchange and links to its active sequence.
47 * fc_seq holds the state for an individual sequence.
53 * This structure is the center for creating exchanges and sequences.
54 * It manages the allocation of exchange IDs.
57 enum fc_class
class; /* default class for sequences */
58 spinlock_t em_lock
; /* exchange manager lock,
59 must be taken before ex_lock */
60 u16 last_xid
; /* last allocated exchange ID */
61 u16 min_xid
; /* min exchange ID */
62 u16 max_xid
; /* max exchange ID */
63 u16 max_read
; /* max exchange ID for read */
64 u16 last_read
; /* last xid allocated for read */
65 u32 total_exches
; /* total allocated exchanges */
66 struct list_head ex_list
; /* allocated exchanges list */
67 struct fc_lport
*lp
; /* fc device instance */
68 mempool_t
*ep_pool
; /* reserve ep's */
71 * currently exchange mgr stats are updated but not used.
72 * either stats can be expose via sysfs or remove them
73 * all together if not used XXX
76 atomic_t no_free_exch
;
77 atomic_t no_free_exch_xid
;
78 atomic_t xid_not_found
;
80 atomic_t seq_not_found
;
81 atomic_t non_bls_resp
;
83 struct fc_exch
**exches
; /* for exch pointers indexed by xid */
85 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
87 static void fc_exch_rrq(struct fc_exch
*);
88 static void fc_seq_ls_acc(struct fc_seq
*);
89 static void fc_seq_ls_rjt(struct fc_seq
*, enum fc_els_rjt_reason
,
90 enum fc_els_rjt_explan
);
91 static void fc_exch_els_rec(struct fc_seq
*, struct fc_frame
*);
92 static void fc_exch_els_rrq(struct fc_seq
*, struct fc_frame
*);
93 static struct fc_seq
*fc_seq_start_next_locked(struct fc_seq
*sp
);
96 * Internal implementation notes.
98 * The exchange manager is one by default in libfc but LLD may choose
99 * to have one per CPU. The sequence manager is one per exchange manager
100 * and currently never separated.
102 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
103 * assigned by the Sequence Initiator that shall be unique for a specific
104 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
105 * qualified by exchange ID, which one might think it would be.
106 * In practice this limits the number of open sequences and exchanges to 256
107 * per session. For most targets we could treat this limit as per exchange.
109 * The exchange and its sequence are freed when the last sequence is received.
110 * It's possible for the remote port to leave an exchange open without
111 * sending any sequences.
113 * Notes on reference counts:
115 * Exchanges are reference counted and exchange gets freed when the reference
116 * count becomes zero.
119 * Sequences are timed out for E_D_TOV and R_A_TOV.
121 * Sequence event handling:
123 * The following events may occur on initiator sequences:
126 * For now, the whole thing is sent.
128 * This applies only to class F.
129 * The sequence is marked complete.
131 * The upper layer calls fc_exch_done() when done
132 * with exchange and sequence tuple.
133 * RX-inferred completion.
134 * When we receive the next sequence on the same exchange, we can
135 * retire the previous sequence ID. (XXX not implemented).
137 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
138 * E_D_TOV causes abort and calls upper layer response handler
139 * with FC_EX_TIMEOUT error.
145 * The following events may occur on recipient sequences:
148 * Allocate sequence for first frame received.
149 * Hold during receive handler.
150 * Release when final frame received.
151 * Keep status of last N of these for the ELS RES command. XXX TBD.
153 * Deallocate sequence
157 * For now, we neglect conditions where only part of a sequence was
158 * received or transmitted, or where out-of-order receipt is detected.
164 * The EM code run in a per-CPU worker thread.
166 * To protect against concurrency between a worker thread code and timers,
167 * sequence allocation and deallocation must be locked.
168 * - exchange refcnt can be done atomicly without locks.
169 * - sequence allocation must be locked by exch lock.
170 * - If the em_lock and ex_lock must be taken at the same time, then the
171 * em_lock must be taken before the ex_lock.
175 * opcode names for debugging.
177 static char *fc_exch_rctl_names
[] = FC_RCTL_NAMES_INIT
;
179 #define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
181 static inline const char *fc_exch_name_lookup(unsigned int op
, char **table
,
182 unsigned int max_index
)
184 const char *name
= NULL
;
193 static const char *fc_exch_rctl_name(unsigned int op
)
195 return fc_exch_name_lookup(op
, fc_exch_rctl_names
,
196 FC_TABLE_SIZE(fc_exch_rctl_names
));
200 * Hold an exchange - keep it from being freed.
202 static void fc_exch_hold(struct fc_exch
*ep
)
204 atomic_inc(&ep
->ex_refcnt
);
208 * setup fc hdr by initializing few more FC header fields and sof/eof.
209 * Initialized fields by this func:
210 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
213 static void fc_exch_setup_hdr(struct fc_exch
*ep
, struct fc_frame
*fp
,
216 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
219 fr_sof(fp
) = ep
->class;
221 fr_sof(fp
) = fc_sof_normal(ep
->class);
223 if (f_ctl
& FC_FC_END_SEQ
) {
224 fr_eof(fp
) = FC_EOF_T
;
225 if (fc_sof_needs_ack(ep
->class))
226 fr_eof(fp
) = FC_EOF_N
;
229 * The number of fill bytes to make the length a 4-byte
230 * multiple is the low order 2-bits of the f_ctl.
231 * The fill itself will have been cleared by the frame
233 * After this, the length will be even, as expected by
236 fill
= fr_len(fp
) & 3;
239 /* TODO, this may be a problem with fragmented skb */
240 skb_put(fp_skb(fp
), fill
);
241 hton24(fh
->fh_f_ctl
, f_ctl
| fill
);
244 WARN_ON(fr_len(fp
) % 4 != 0); /* no pad to non last frame */
245 fr_eof(fp
) = FC_EOF_N
;
249 * Initialize remainig fh fields
250 * from fc_fill_fc_hdr
252 fh
->fh_ox_id
= htons(ep
->oxid
);
253 fh
->fh_rx_id
= htons(ep
->rxid
);
254 fh
->fh_seq_id
= ep
->seq
.id
;
255 fh
->fh_seq_cnt
= htons(ep
->seq
.cnt
);
260 * Release a reference to an exchange.
261 * If the refcnt goes to zero and the exchange is complete, it is freed.
263 static void fc_exch_release(struct fc_exch
*ep
)
265 struct fc_exch_mgr
*mp
;
267 if (atomic_dec_and_test(&ep
->ex_refcnt
)) {
270 ep
->destructor(&ep
->seq
, ep
->arg
);
271 if (ep
->lp
->tt
.exch_put
)
272 ep
->lp
->tt
.exch_put(ep
->lp
, mp
, ep
->xid
);
273 WARN_ON(!(ep
->esb_stat
& ESB_ST_COMPLETE
));
274 mempool_free(ep
, mp
->ep_pool
);
278 static int fc_exch_done_locked(struct fc_exch
*ep
)
283 * We must check for completion in case there are two threads
284 * tyring to complete this. But the rrq code will reuse the
285 * ep, and in that case we only clear the resp and set it as
286 * complete, so it can be reused by the timer to send the rrq.
289 if (ep
->state
& FC_EX_DONE
)
291 ep
->esb_stat
|= ESB_ST_COMPLETE
;
293 if (!(ep
->esb_stat
& ESB_ST_REC_QUAL
)) {
294 ep
->state
|= FC_EX_DONE
;
295 if (cancel_delayed_work(&ep
->timeout_work
))
296 atomic_dec(&ep
->ex_refcnt
); /* drop hold for timer */
302 static void fc_exch_mgr_delete_ep(struct fc_exch
*ep
)
304 struct fc_exch_mgr
*mp
;
307 spin_lock_bh(&mp
->em_lock
);
308 WARN_ON(mp
->total_exches
<= 0);
310 mp
->exches
[ep
->xid
- mp
->min_xid
] = NULL
;
311 list_del(&ep
->ex_list
);
312 spin_unlock_bh(&mp
->em_lock
);
313 fc_exch_release(ep
); /* drop hold for exch in mp */
317 * Internal version of fc_exch_timer_set - used with lock held.
319 static inline void fc_exch_timer_set_locked(struct fc_exch
*ep
,
320 unsigned int timer_msec
)
322 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
))
325 FC_EXCH_DBG(ep
, "Exchange timed out, notifying the upper layer\n");
327 if (schedule_delayed_work(&ep
->timeout_work
,
328 msecs_to_jiffies(timer_msec
)))
329 fc_exch_hold(ep
); /* hold for timer */
333 * Set timer for an exchange.
334 * The time is a minimum delay in milliseconds until the timer fires.
335 * Used for upper level protocols to time out the exchange.
336 * The timer is cancelled when it fires or when the exchange completes.
337 * Returns non-zero if a timer couldn't be allocated.
339 static void fc_exch_timer_set(struct fc_exch
*ep
, unsigned int timer_msec
)
341 spin_lock_bh(&ep
->ex_lock
);
342 fc_exch_timer_set_locked(ep
, timer_msec
);
343 spin_unlock_bh(&ep
->ex_lock
);
346 int fc_seq_exch_abort(const struct fc_seq
*req_sp
, unsigned int timer_msec
)
353 ep
= fc_seq_exch(req_sp
);
355 spin_lock_bh(&ep
->ex_lock
);
356 if (ep
->esb_stat
& (ESB_ST_COMPLETE
| ESB_ST_ABNORMAL
) ||
357 ep
->state
& (FC_EX_DONE
| FC_EX_RST_CLEANUP
)) {
358 spin_unlock_bh(&ep
->ex_lock
);
363 * Send the abort on a new sequence if possible.
365 sp
= fc_seq_start_next_locked(&ep
->seq
);
367 spin_unlock_bh(&ep
->ex_lock
);
371 ep
->esb_stat
|= ESB_ST_SEQ_INIT
| ESB_ST_ABNORMAL
;
373 fc_exch_timer_set_locked(ep
, timer_msec
);
374 spin_unlock_bh(&ep
->ex_lock
);
377 * If not logged into the fabric, don't send ABTS but leave
378 * sequence active until next timeout.
384 * Send an abort for the sequence that timed out.
386 fp
= fc_frame_alloc(ep
->lp
, 0);
388 fc_fill_fc_hdr(fp
, FC_RCTL_BA_ABTS
, ep
->did
, ep
->sid
,
389 FC_TYPE_BLS
, FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
390 error
= fc_seq_send(ep
->lp
, sp
, fp
);
395 EXPORT_SYMBOL(fc_seq_exch_abort
);
398 * Exchange timeout - handle exchange timer expiration.
399 * The timer will have been cancelled before this is called.
401 static void fc_exch_timeout(struct work_struct
*work
)
403 struct fc_exch
*ep
= container_of(work
, struct fc_exch
,
405 struct fc_seq
*sp
= &ep
->seq
;
406 void (*resp
)(struct fc_seq
*, struct fc_frame
*fp
, void *arg
);
411 spin_lock_bh(&ep
->ex_lock
);
412 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
))
415 e_stat
= ep
->esb_stat
;
416 if (e_stat
& ESB_ST_COMPLETE
) {
417 ep
->esb_stat
= e_stat
& ~ESB_ST_REC_QUAL
;
418 if (e_stat
& ESB_ST_REC_QUAL
)
420 spin_unlock_bh(&ep
->ex_lock
);
426 if (e_stat
& ESB_ST_ABNORMAL
)
427 rc
= fc_exch_done_locked(ep
);
428 spin_unlock_bh(&ep
->ex_lock
);
430 fc_exch_mgr_delete_ep(ep
);
432 resp(sp
, ERR_PTR(-FC_EX_TIMEOUT
), arg
);
433 fc_seq_exch_abort(sp
, 2 * ep
->r_a_tov
);
437 spin_unlock_bh(&ep
->ex_lock
);
440 * This release matches the hold taken when the timer was set.
446 * Allocate a sequence.
448 * We don't support multiple originated sequences on the same exchange.
449 * By implication, any previously originated sequence on this exchange
450 * is complete, and we reallocate the same sequence.
452 static struct fc_seq
*fc_seq_alloc(struct fc_exch
*ep
, u8 seq_id
)
464 * fc_em_alloc_xid - returns an xid based on request type
465 * @lp : ptr to associated lport
466 * @fp : ptr to the assocated frame
468 * check the associated fc_fsp_pkt to get scsi command type and
469 * command direction to decide from which range this exch id
470 * will be allocated from.
472 * Returns : 0 or an valid xid
474 static u16
fc_em_alloc_xid(struct fc_exch_mgr
*mp
, const struct fc_frame
*fp
)
478 struct fc_exch
*ep
= NULL
;
481 if (fc_fcp_is_read(fr_fsp(fp
))) {
484 plast
= &mp
->last_read
;
486 min
= mp
->max_read
+ 1;
488 plast
= &mp
->last_xid
;
493 plast
= &mp
->last_xid
;
497 xid
= (xid
== max
) ? min
: xid
+ 1;
498 ep
= mp
->exches
[xid
- mp
->min_xid
];
499 } while ((ep
!= NULL
) && (xid
!= *plast
));
510 * fc_exch_alloc - allocate an exchange.
511 * @mp : ptr to the exchange manager
514 * if xid is supplied zero then assign next free exchange ID
515 * from exchange manager, otherwise use supplied xid.
516 * Returns with exch lock held.
518 struct fc_exch
*fc_exch_alloc(struct fc_exch_mgr
*mp
,
519 struct fc_frame
*fp
, u16 xid
)
523 /* allocate memory for exchange */
524 ep
= mempool_alloc(mp
->ep_pool
, GFP_ATOMIC
);
526 atomic_inc(&mp
->stats
.no_free_exch
);
529 memset(ep
, 0, sizeof(*ep
));
531 spin_lock_bh(&mp
->em_lock
);
532 /* alloc xid if input xid 0 */
534 /* alloc a new xid */
535 xid
= fc_em_alloc_xid(mp
, fp
);
537 printk(KERN_WARNING
"libfc: Failed to allocate an exhange\n");
542 fc_exch_hold(ep
); /* hold for exch in mp */
543 spin_lock_init(&ep
->ex_lock
);
545 * Hold exch lock for caller to prevent fc_exch_reset()
546 * from releasing exch while fc_exch_alloc() caller is
547 * still working on exch.
549 spin_lock_bh(&ep
->ex_lock
);
551 mp
->exches
[xid
- mp
->min_xid
] = ep
;
552 list_add_tail(&ep
->ex_list
, &mp
->ex_list
);
553 fc_seq_alloc(ep
, ep
->seq_id
++);
555 spin_unlock_bh(&mp
->em_lock
);
560 ep
->oxid
= ep
->xid
= xid
;
563 ep
->f_ctl
= FC_FC_FIRST_SEQ
; /* next seq is first seq */
564 ep
->rxid
= FC_XID_UNKNOWN
;
565 ep
->class = mp
->class;
566 INIT_DELAYED_WORK(&ep
->timeout_work
, fc_exch_timeout
);
570 spin_unlock_bh(&mp
->em_lock
);
571 atomic_inc(&mp
->stats
.no_free_exch_xid
);
572 mempool_free(ep
, mp
->ep_pool
);
575 EXPORT_SYMBOL(fc_exch_alloc
);
578 * Lookup and hold an exchange.
580 static struct fc_exch
*fc_exch_find(struct fc_exch_mgr
*mp
, u16 xid
)
582 struct fc_exch
*ep
= NULL
;
584 if ((xid
>= mp
->min_xid
) && (xid
<= mp
->max_xid
)) {
585 spin_lock_bh(&mp
->em_lock
);
586 ep
= mp
->exches
[xid
- mp
->min_xid
];
589 WARN_ON(ep
->xid
!= xid
);
591 spin_unlock_bh(&mp
->em_lock
);
596 void fc_exch_done(struct fc_seq
*sp
)
598 struct fc_exch
*ep
= fc_seq_exch(sp
);
601 spin_lock_bh(&ep
->ex_lock
);
602 rc
= fc_exch_done_locked(ep
);
603 spin_unlock_bh(&ep
->ex_lock
);
605 fc_exch_mgr_delete_ep(ep
);
607 EXPORT_SYMBOL(fc_exch_done
);
610 * Allocate a new exchange as responder.
611 * Sets the responder ID in the frame header.
613 static struct fc_exch
*fc_exch_resp(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
616 struct fc_frame_header
*fh
;
618 ep
= mp
->lp
->tt
.exch_get(mp
->lp
, fp
);
620 ep
->class = fc_frame_class(fp
);
623 * Set EX_CTX indicating we're responding on this exchange.
625 ep
->f_ctl
|= FC_FC_EX_CTX
; /* we're responding */
626 ep
->f_ctl
&= ~FC_FC_FIRST_SEQ
; /* not new */
627 fh
= fc_frame_header_get(fp
);
628 ep
->sid
= ntoh24(fh
->fh_d_id
);
629 ep
->did
= ntoh24(fh
->fh_s_id
);
633 * Allocated exchange has placed the XID in the
634 * originator field. Move it to the responder field,
635 * and set the originator XID from the frame.
638 ep
->oxid
= ntohs(fh
->fh_ox_id
);
639 ep
->esb_stat
|= ESB_ST_RESP
| ESB_ST_SEQ_INIT
;
640 if ((ntoh24(fh
->fh_f_ctl
) & FC_FC_SEQ_INIT
) == 0)
641 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
643 fc_exch_hold(ep
); /* hold for caller */
644 spin_unlock_bh(&ep
->ex_lock
); /* lock from exch_get */
650 * Find a sequence for receive where the other end is originating the sequence.
651 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
652 * on the ep that should be released by the caller.
654 static enum fc_pf_rjt_reason
fc_seq_lookup_recip(struct fc_exch_mgr
*mp
,
657 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
658 struct fc_exch
*ep
= NULL
;
659 struct fc_seq
*sp
= NULL
;
660 enum fc_pf_rjt_reason reject
= FC_RJT_NONE
;
664 f_ctl
= ntoh24(fh
->fh_f_ctl
);
665 WARN_ON((f_ctl
& FC_FC_SEQ_CTX
) != 0);
668 * Lookup or create the exchange if we will be creating the sequence.
670 if (f_ctl
& FC_FC_EX_CTX
) {
671 xid
= ntohs(fh
->fh_ox_id
); /* we originated exch */
672 ep
= fc_exch_find(mp
, xid
);
674 atomic_inc(&mp
->stats
.xid_not_found
);
675 reject
= FC_RJT_OX_ID
;
678 if (ep
->rxid
== FC_XID_UNKNOWN
)
679 ep
->rxid
= ntohs(fh
->fh_rx_id
);
680 else if (ep
->rxid
!= ntohs(fh
->fh_rx_id
)) {
681 reject
= FC_RJT_OX_ID
;
685 xid
= ntohs(fh
->fh_rx_id
); /* we are the responder */
688 * Special case for MDS issuing an ELS TEST with a
690 * XXX take this out once we do the proper reject.
692 if (xid
== 0 && fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
&&
693 fc_frame_payload_op(fp
) == ELS_TEST
) {
694 fh
->fh_rx_id
= htons(FC_XID_UNKNOWN
);
695 xid
= FC_XID_UNKNOWN
;
699 * new sequence - find the exchange
701 ep
= fc_exch_find(mp
, xid
);
702 if ((f_ctl
& FC_FC_FIRST_SEQ
) && fc_sof_is_init(fr_sof(fp
))) {
704 atomic_inc(&mp
->stats
.xid_busy
);
705 reject
= FC_RJT_RX_ID
;
708 ep
= fc_exch_resp(mp
, fp
);
710 reject
= FC_RJT_EXCH_EST
; /* XXX */
713 xid
= ep
->xid
; /* get our XID */
715 atomic_inc(&mp
->stats
.xid_not_found
);
716 reject
= FC_RJT_RX_ID
; /* XID not found */
722 * At this point, we have the exchange held.
723 * Find or create the sequence.
725 if (fc_sof_is_init(fr_sof(fp
))) {
726 sp
= fc_seq_start_next(&ep
->seq
);
728 reject
= FC_RJT_SEQ_XS
; /* exchange shortage */
731 sp
->id
= fh
->fh_seq_id
;
732 sp
->ssb_stat
|= SSB_ST_RESP
;
735 if (sp
->id
!= fh
->fh_seq_id
) {
736 atomic_inc(&mp
->stats
.seq_not_found
);
737 reject
= FC_RJT_SEQ_ID
; /* sequence/exch should exist */
741 WARN_ON(ep
!= fc_seq_exch(sp
));
743 if (f_ctl
& FC_FC_SEQ_INIT
)
744 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
750 fc_exch_done(&ep
->seq
);
751 fc_exch_release(ep
); /* hold from fc_exch_find/fc_exch_resp */
756 * Find the sequence for a frame being received.
757 * We originated the sequence, so it should be found.
758 * We may or may not have originated the exchange.
759 * Does not hold the sequence for the caller.
761 static struct fc_seq
*fc_seq_lookup_orig(struct fc_exch_mgr
*mp
,
764 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
766 struct fc_seq
*sp
= NULL
;
770 f_ctl
= ntoh24(fh
->fh_f_ctl
);
771 WARN_ON((f_ctl
& FC_FC_SEQ_CTX
) != FC_FC_SEQ_CTX
);
772 xid
= ntohs((f_ctl
& FC_FC_EX_CTX
) ? fh
->fh_ox_id
: fh
->fh_rx_id
);
773 ep
= fc_exch_find(mp
, xid
);
776 if (ep
->seq
.id
== fh
->fh_seq_id
) {
778 * Save the RX_ID if we didn't previously know it.
781 if ((f_ctl
& FC_FC_EX_CTX
) != 0 &&
782 ep
->rxid
== FC_XID_UNKNOWN
) {
783 ep
->rxid
= ntohs(fh
->fh_rx_id
);
791 * Set addresses for an exchange.
792 * Note this must be done before the first sequence of the exchange is sent.
794 static void fc_exch_set_addr(struct fc_exch
*ep
,
795 u32 orig_id
, u32 resp_id
)
798 if (ep
->esb_stat
& ESB_ST_RESP
) {
807 static struct fc_seq
*fc_seq_start_next_locked(struct fc_seq
*sp
)
809 struct fc_exch
*ep
= fc_seq_exch(sp
);
811 sp
= fc_seq_alloc(ep
, ep
->seq_id
++);
812 FC_EXCH_DBG(ep
, "f_ctl %6x seq %2x\n",
817 * Allocate a new sequence on the same exchange as the supplied sequence.
818 * This will never return NULL.
820 struct fc_seq
*fc_seq_start_next(struct fc_seq
*sp
)
822 struct fc_exch
*ep
= fc_seq_exch(sp
);
824 spin_lock_bh(&ep
->ex_lock
);
825 WARN_ON((ep
->esb_stat
& ESB_ST_COMPLETE
) != 0);
826 sp
= fc_seq_start_next_locked(sp
);
827 spin_unlock_bh(&ep
->ex_lock
);
831 EXPORT_SYMBOL(fc_seq_start_next
);
833 int fc_seq_send(struct fc_lport
*lp
, struct fc_seq
*sp
, struct fc_frame
*fp
)
836 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
840 ep
= fc_seq_exch(sp
);
841 WARN_ON((ep
->esb_stat
& ESB_ST_SEQ_INIT
) != ESB_ST_SEQ_INIT
);
843 f_ctl
= ntoh24(fh
->fh_f_ctl
);
844 fc_exch_setup_hdr(ep
, fp
, f_ctl
);
847 * update sequence count if this frame is carrying
848 * multiple FC frames when sequence offload is enabled
851 if (fr_max_payload(fp
))
852 sp
->cnt
+= DIV_ROUND_UP((fr_len(fp
) - sizeof(*fh
)),
860 error
= lp
->tt
.frame_send(lp
, fp
);
863 * Update the exchange and sequence flags,
864 * assuming all frames for the sequence have been sent.
865 * We can only be called to send once for each sequence.
867 spin_lock_bh(&ep
->ex_lock
);
868 ep
->f_ctl
= f_ctl
& ~FC_FC_FIRST_SEQ
; /* not first seq */
869 if (f_ctl
& (FC_FC_END_SEQ
| FC_FC_SEQ_INIT
))
870 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
871 spin_unlock_bh(&ep
->ex_lock
);
874 EXPORT_SYMBOL(fc_seq_send
);
876 void fc_seq_els_rsp_send(struct fc_seq
*sp
, enum fc_els_cmd els_cmd
,
877 struct fc_seq_els_data
*els_data
)
881 fc_seq_ls_rjt(sp
, els_data
->reason
, els_data
->explan
);
887 fc_exch_els_rrq(sp
, els_data
->fp
);
890 fc_exch_els_rec(sp
, els_data
->fp
);
893 FC_EXCH_DBG(fc_seq_exch(sp
), "Invalid ELS CMD:%x\n", els_cmd
);
896 EXPORT_SYMBOL(fc_seq_els_rsp_send
);
899 * Send a sequence, which is also the last sequence in the exchange.
901 static void fc_seq_send_last(struct fc_seq
*sp
, struct fc_frame
*fp
,
902 enum fc_rctl rctl
, enum fc_fh_type fh_type
)
905 struct fc_exch
*ep
= fc_seq_exch(sp
);
907 f_ctl
= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
;
909 fc_fill_fc_hdr(fp
, rctl
, ep
->did
, ep
->sid
, fh_type
, f_ctl
, 0);
910 fc_seq_send(ep
->lp
, sp
, fp
);
914 * Send ACK_1 (or equiv.) indicating we received something.
915 * The frame we're acking is supplied.
917 static void fc_seq_send_ack(struct fc_seq
*sp
, const struct fc_frame
*rx_fp
)
920 struct fc_frame_header
*rx_fh
;
921 struct fc_frame_header
*fh
;
922 struct fc_exch
*ep
= fc_seq_exch(sp
);
923 struct fc_lport
*lp
= ep
->lp
;
927 * Don't send ACKs for class 3.
929 if (fc_sof_needs_ack(fr_sof(rx_fp
))) {
930 fp
= fc_frame_alloc(lp
, 0);
934 fh
= fc_frame_header_get(fp
);
935 fh
->fh_r_ctl
= FC_RCTL_ACK_1
;
936 fh
->fh_type
= FC_TYPE_BLS
;
939 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
940 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
941 * Bits 9-8 are meaningful (retransmitted or unidirectional).
942 * Last ACK uses bits 7-6 (continue sequence),
943 * bits 5-4 are meaningful (what kind of ACK to use).
945 rx_fh
= fc_frame_header_get(rx_fp
);
946 f_ctl
= ntoh24(rx_fh
->fh_f_ctl
);
947 f_ctl
&= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
|
948 FC_FC_FIRST_SEQ
| FC_FC_LAST_SEQ
|
949 FC_FC_END_SEQ
| FC_FC_END_CONN
| FC_FC_SEQ_INIT
|
950 FC_FC_RETX_SEQ
| FC_FC_UNI_TX
;
951 f_ctl
^= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
;
952 hton24(fh
->fh_f_ctl
, f_ctl
);
954 fc_exch_setup_hdr(ep
, fp
, f_ctl
);
955 fh
->fh_seq_id
= rx_fh
->fh_seq_id
;
956 fh
->fh_seq_cnt
= rx_fh
->fh_seq_cnt
;
957 fh
->fh_parm_offset
= htonl(1); /* ack single frame */
959 fr_sof(fp
) = fr_sof(rx_fp
);
960 if (f_ctl
& FC_FC_END_SEQ
)
961 fr_eof(fp
) = FC_EOF_T
;
963 fr_eof(fp
) = FC_EOF_N
;
965 (void) lp
->tt
.frame_send(lp
, fp
);
971 * This is for rejecting BA_ABTS only.
973 static void fc_exch_send_ba_rjt(struct fc_frame
*rx_fp
,
974 enum fc_ba_rjt_reason reason
,
975 enum fc_ba_rjt_explan explan
)
978 struct fc_frame_header
*rx_fh
;
979 struct fc_frame_header
*fh
;
980 struct fc_ba_rjt
*rp
;
985 fp
= fc_frame_alloc(lp
, sizeof(*rp
));
988 fh
= fc_frame_header_get(fp
);
989 rx_fh
= fc_frame_header_get(rx_fp
);
991 memset(fh
, 0, sizeof(*fh
) + sizeof(*rp
));
993 rp
= fc_frame_payload_get(fp
, sizeof(*rp
));
994 rp
->br_reason
= reason
;
995 rp
->br_explan
= explan
;
998 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1000 memcpy(fh
->fh_s_id
, rx_fh
->fh_d_id
, 3);
1001 memcpy(fh
->fh_d_id
, rx_fh
->fh_s_id
, 3);
1002 fh
->fh_ox_id
= rx_fh
->fh_rx_id
;
1003 fh
->fh_rx_id
= rx_fh
->fh_ox_id
;
1004 fh
->fh_seq_cnt
= rx_fh
->fh_seq_cnt
;
1005 fh
->fh_r_ctl
= FC_RCTL_BA_RJT
;
1006 fh
->fh_type
= FC_TYPE_BLS
;
1009 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1010 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1011 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1012 * Last ACK uses bits 7-6 (continue sequence),
1013 * bits 5-4 are meaningful (what kind of ACK to use).
1014 * Always set LAST_SEQ, END_SEQ.
1016 f_ctl
= ntoh24(rx_fh
->fh_f_ctl
);
1017 f_ctl
&= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
|
1018 FC_FC_END_CONN
| FC_FC_SEQ_INIT
|
1019 FC_FC_RETX_SEQ
| FC_FC_UNI_TX
;
1020 f_ctl
^= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
;
1021 f_ctl
|= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
;
1022 f_ctl
&= ~FC_FC_FIRST_SEQ
;
1023 hton24(fh
->fh_f_ctl
, f_ctl
);
1025 fr_sof(fp
) = fc_sof_class(fr_sof(rx_fp
));
1026 fr_eof(fp
) = FC_EOF_T
;
1027 if (fc_sof_needs_ack(fr_sof(fp
)))
1028 fr_eof(fp
) = FC_EOF_N
;
1030 (void) lp
->tt
.frame_send(lp
, fp
);
1034 * Handle an incoming ABTS. This would be for target mode usually,
1035 * but could be due to lost FCP transfer ready, confirm or RRQ.
1036 * We always handle this as an exchange abort, ignoring the parameter.
1038 static void fc_exch_recv_abts(struct fc_exch
*ep
, struct fc_frame
*rx_fp
)
1040 struct fc_frame
*fp
;
1041 struct fc_ba_acc
*ap
;
1042 struct fc_frame_header
*fh
;
1047 spin_lock_bh(&ep
->ex_lock
);
1048 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1049 spin_unlock_bh(&ep
->ex_lock
);
1052 if (!(ep
->esb_stat
& ESB_ST_REC_QUAL
))
1053 fc_exch_hold(ep
); /* hold for REC_QUAL */
1054 ep
->esb_stat
|= ESB_ST_ABNORMAL
| ESB_ST_REC_QUAL
;
1055 fc_exch_timer_set_locked(ep
, ep
->r_a_tov
);
1057 fp
= fc_frame_alloc(ep
->lp
, sizeof(*ap
));
1059 spin_unlock_bh(&ep
->ex_lock
);
1062 fh
= fc_frame_header_get(fp
);
1063 ap
= fc_frame_payload_get(fp
, sizeof(*ap
));
1064 memset(ap
, 0, sizeof(*ap
));
1066 ap
->ba_high_seq_cnt
= htons(0xffff);
1067 if (sp
->ssb_stat
& SSB_ST_RESP
) {
1068 ap
->ba_seq_id
= sp
->id
;
1069 ap
->ba_seq_id_val
= FC_BA_SEQ_ID_VAL
;
1070 ap
->ba_high_seq_cnt
= fh
->fh_seq_cnt
;
1071 ap
->ba_low_seq_cnt
= htons(sp
->cnt
);
1073 sp
= fc_seq_start_next_locked(sp
);
1074 spin_unlock_bh(&ep
->ex_lock
);
1075 fc_seq_send_last(sp
, fp
, FC_RCTL_BA_ACC
, FC_TYPE_BLS
);
1076 fc_frame_free(rx_fp
);
1080 fc_exch_send_ba_rjt(rx_fp
, FC_BA_RJT_UNABLE
, FC_BA_RJT_INV_XID
);
1082 fc_frame_free(rx_fp
);
1086 * Handle receive where the other end is originating the sequence.
1088 static void fc_exch_recv_req(struct fc_lport
*lp
, struct fc_exch_mgr
*mp
,
1089 struct fc_frame
*fp
)
1091 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1092 struct fc_seq
*sp
= NULL
;
1093 struct fc_exch
*ep
= NULL
;
1097 enum fc_pf_rjt_reason reject
;
1100 reject
= fc_seq_lookup_recip(mp
, fp
);
1101 if (reject
== FC_RJT_NONE
) {
1102 sp
= fr_seq(fp
); /* sequence will be held */
1103 ep
= fc_seq_exch(sp
);
1106 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1107 fc_seq_send_ack(sp
, fp
);
1110 * Call the receive function.
1112 * The receive function may allocate a new sequence
1113 * over the old one, so we shouldn't change the
1114 * sequence after this.
1116 * The frame will be freed by the receive function.
1117 * If new exch resp handler is valid then call that
1121 ep
->resp(sp
, fp
, ep
->arg
);
1123 lp
->tt
.lport_recv(lp
, sp
, fp
);
1124 fc_exch_release(ep
); /* release from lookup */
1126 FC_EM_DBG(mp
, "exch/seq lookup failed: reject %x\n", reject
);
1132 * Handle receive where the other end is originating the sequence in
1133 * response to our exchange.
1135 static void fc_exch_recv_seq_resp(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1137 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1142 void (*resp
)(struct fc_seq
*, struct fc_frame
*fp
, void *arg
);
1146 ep
= fc_exch_find(mp
, ntohs(fh
->fh_ox_id
));
1148 atomic_inc(&mp
->stats
.xid_not_found
);
1151 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1152 atomic_inc(&mp
->stats
.xid_not_found
);
1155 if (ep
->rxid
== FC_XID_UNKNOWN
)
1156 ep
->rxid
= ntohs(fh
->fh_rx_id
);
1157 if (ep
->sid
!= 0 && ep
->sid
!= ntoh24(fh
->fh_d_id
)) {
1158 atomic_inc(&mp
->stats
.xid_not_found
);
1161 if (ep
->did
!= ntoh24(fh
->fh_s_id
) &&
1162 ep
->did
!= FC_FID_FLOGI
) {
1163 atomic_inc(&mp
->stats
.xid_not_found
);
1167 if (fc_sof_is_init(sof
)) {
1168 sp
= fc_seq_start_next(&ep
->seq
);
1169 sp
->id
= fh
->fh_seq_id
;
1170 sp
->ssb_stat
|= SSB_ST_RESP
;
1173 if (sp
->id
!= fh
->fh_seq_id
) {
1174 atomic_inc(&mp
->stats
.seq_not_found
);
1178 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1180 if (f_ctl
& FC_FC_SEQ_INIT
)
1181 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1183 if (fc_sof_needs_ack(sof
))
1184 fc_seq_send_ack(sp
, fp
);
1186 ex_resp_arg
= ep
->arg
;
1188 if (fh
->fh_type
!= FC_TYPE_FCP
&& fr_eof(fp
) == FC_EOF_T
&&
1189 (f_ctl
& (FC_FC_LAST_SEQ
| FC_FC_END_SEQ
)) ==
1190 (FC_FC_LAST_SEQ
| FC_FC_END_SEQ
)) {
1191 spin_lock_bh(&ep
->ex_lock
);
1192 rc
= fc_exch_done_locked(ep
);
1193 WARN_ON(fc_seq_exch(sp
) != ep
);
1194 spin_unlock_bh(&ep
->ex_lock
);
1196 fc_exch_mgr_delete_ep(ep
);
1200 * Call the receive function.
1201 * The sequence is held (has a refcnt) for us,
1202 * but not for the receive function.
1204 * The receive function may allocate a new sequence
1205 * over the old one, so we shouldn't change the
1206 * sequence after this.
1208 * The frame will be freed by the receive function.
1209 * If new exch resp handler is valid then call that
1213 resp(sp
, fp
, ex_resp_arg
);
1216 fc_exch_release(ep
);
1219 fc_exch_release(ep
);
1225 * Handle receive for a sequence where other end is responding to our sequence.
1227 static void fc_exch_recv_resp(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1231 sp
= fc_seq_lookup_orig(mp
, fp
); /* doesn't hold sequence */
1233 atomic_inc(&mp
->stats
.xid_not_found
);
1234 FC_EM_DBG(mp
, "seq lookup failed\n");
1236 atomic_inc(&mp
->stats
.non_bls_resp
);
1237 FC_EM_DBG(mp
, "non-BLS response to sequence");
1243 * Handle the response to an ABTS for exchange or sequence.
1244 * This can be BA_ACC or BA_RJT.
1246 static void fc_exch_abts_resp(struct fc_exch
*ep
, struct fc_frame
*fp
)
1248 void (*resp
)(struct fc_seq
*, struct fc_frame
*fp
, void *arg
);
1250 struct fc_frame_header
*fh
;
1251 struct fc_ba_acc
*ap
;
1255 int rc
= 1, has_rec
= 0;
1257 fh
= fc_frame_header_get(fp
);
1258 FC_EXCH_DBG(ep
, "exch: BLS rctl %x - %s\n", fh
->fh_r_ctl
,
1259 fc_exch_rctl_name(fh
->fh_r_ctl
));
1261 if (cancel_delayed_work_sync(&ep
->timeout_work
))
1262 fc_exch_release(ep
); /* release from pending timer hold */
1264 spin_lock_bh(&ep
->ex_lock
);
1265 switch (fh
->fh_r_ctl
) {
1266 case FC_RCTL_BA_ACC
:
1267 ap
= fc_frame_payload_get(fp
, sizeof(*ap
));
1272 * Decide whether to establish a Recovery Qualifier.
1273 * We do this if there is a non-empty SEQ_CNT range and
1274 * SEQ_ID is the same as the one we aborted.
1276 low
= ntohs(ap
->ba_low_seq_cnt
);
1277 high
= ntohs(ap
->ba_high_seq_cnt
);
1278 if ((ep
->esb_stat
& ESB_ST_REC_QUAL
) == 0 &&
1279 (ap
->ba_seq_id_val
!= FC_BA_SEQ_ID_VAL
||
1280 ap
->ba_seq_id
== ep
->seq_id
) && low
!= high
) {
1281 ep
->esb_stat
|= ESB_ST_REC_QUAL
;
1282 fc_exch_hold(ep
); /* hold for recovery qualifier */
1286 case FC_RCTL_BA_RJT
:
1293 ex_resp_arg
= ep
->arg
;
1295 /* do we need to do some other checks here. Can we reuse more of
1296 * fc_exch_recv_seq_resp
1300 * do we want to check END_SEQ as well as LAST_SEQ here?
1302 if (ep
->fh_type
!= FC_TYPE_FCP
&&
1303 ntoh24(fh
->fh_f_ctl
) & FC_FC_LAST_SEQ
)
1304 rc
= fc_exch_done_locked(ep
);
1305 spin_unlock_bh(&ep
->ex_lock
);
1307 fc_exch_mgr_delete_ep(ep
);
1310 resp(sp
, fp
, ex_resp_arg
);
1315 fc_exch_timer_set(ep
, ep
->r_a_tov
);
1320 * Receive BLS sequence.
1321 * This is always a sequence initiated by the remote side.
1322 * We may be either the originator or recipient of the exchange.
1324 static void fc_exch_recv_bls(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1326 struct fc_frame_header
*fh
;
1330 fh
= fc_frame_header_get(fp
);
1331 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1334 ep
= fc_exch_find(mp
, (f_ctl
& FC_FC_EX_CTX
) ?
1335 ntohs(fh
->fh_ox_id
) : ntohs(fh
->fh_rx_id
));
1336 if (ep
&& (f_ctl
& FC_FC_SEQ_INIT
)) {
1337 spin_lock_bh(&ep
->ex_lock
);
1338 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1339 spin_unlock_bh(&ep
->ex_lock
);
1341 if (f_ctl
& FC_FC_SEQ_CTX
) {
1343 * A response to a sequence we initiated.
1344 * This should only be ACKs for class 2 or F.
1346 switch (fh
->fh_r_ctl
) {
1351 FC_EXCH_DBG(ep
, "BLS rctl %x - %s received",
1353 fc_exch_rctl_name(fh
->fh_r_ctl
));
1358 switch (fh
->fh_r_ctl
) {
1359 case FC_RCTL_BA_RJT
:
1360 case FC_RCTL_BA_ACC
:
1362 fc_exch_abts_resp(ep
, fp
);
1366 case FC_RCTL_BA_ABTS
:
1367 fc_exch_recv_abts(ep
, fp
);
1369 default: /* ignore junk */
1375 fc_exch_release(ep
); /* release hold taken by fc_exch_find */
1379 * Accept sequence with LS_ACC.
1380 * If this fails due to allocation or transmit congestion, assume the
1381 * originator will repeat the sequence.
1383 static void fc_seq_ls_acc(struct fc_seq
*req_sp
)
1386 struct fc_els_ls_acc
*acc
;
1387 struct fc_frame
*fp
;
1389 sp
= fc_seq_start_next(req_sp
);
1390 fp
= fc_frame_alloc(fc_seq_exch(sp
)->lp
, sizeof(*acc
));
1392 acc
= fc_frame_payload_get(fp
, sizeof(*acc
));
1393 memset(acc
, 0, sizeof(*acc
));
1394 acc
->la_cmd
= ELS_LS_ACC
;
1395 fc_seq_send_last(sp
, fp
, FC_RCTL_ELS_REP
, FC_TYPE_ELS
);
1400 * Reject sequence with ELS LS_RJT.
1401 * If this fails due to allocation or transmit congestion, assume the
1402 * originator will repeat the sequence.
1404 static void fc_seq_ls_rjt(struct fc_seq
*req_sp
, enum fc_els_rjt_reason reason
,
1405 enum fc_els_rjt_explan explan
)
1408 struct fc_els_ls_rjt
*rjt
;
1409 struct fc_frame
*fp
;
1411 sp
= fc_seq_start_next(req_sp
);
1412 fp
= fc_frame_alloc(fc_seq_exch(sp
)->lp
, sizeof(*rjt
));
1414 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
1415 memset(rjt
, 0, sizeof(*rjt
));
1416 rjt
->er_cmd
= ELS_LS_RJT
;
1417 rjt
->er_reason
= reason
;
1418 rjt
->er_explan
= explan
;
1419 fc_seq_send_last(sp
, fp
, FC_RCTL_ELS_REP
, FC_TYPE_ELS
);
1423 static void fc_exch_reset(struct fc_exch
*ep
)
1426 void (*resp
)(struct fc_seq
*, struct fc_frame
*, void *);
1430 spin_lock_bh(&ep
->ex_lock
);
1431 ep
->state
|= FC_EX_RST_CLEANUP
;
1433 * we really want to call del_timer_sync, but cannot due
1434 * to the lport calling with the lport lock held (some resp
1435 * functions can also grab the lport lock which could cause
1438 if (cancel_delayed_work(&ep
->timeout_work
))
1439 atomic_dec(&ep
->ex_refcnt
); /* drop hold for timer */
1442 if (ep
->esb_stat
& ESB_ST_REC_QUAL
)
1443 atomic_dec(&ep
->ex_refcnt
); /* drop hold for rec_qual */
1444 ep
->esb_stat
&= ~ESB_ST_REC_QUAL
;
1447 rc
= fc_exch_done_locked(ep
);
1448 spin_unlock_bh(&ep
->ex_lock
);
1450 fc_exch_mgr_delete_ep(ep
);
1453 resp(sp
, ERR_PTR(-FC_EX_CLOSED
), arg
);
1457 * Reset an exchange manager, releasing all sequences and exchanges.
1458 * If sid is non-zero, reset only exchanges we source from that FID.
1459 * If did is non-zero, reset only exchanges destined to that FID.
1461 void fc_exch_mgr_reset(struct fc_lport
*lp
, u32 sid
, u32 did
)
1464 struct fc_exch
*next
;
1465 struct fc_exch_mgr
*mp
= lp
->emp
;
1467 spin_lock_bh(&mp
->em_lock
);
1469 list_for_each_entry_safe(ep
, next
, &mp
->ex_list
, ex_list
) {
1470 if ((sid
== 0 || sid
== ep
->sid
) &&
1471 (did
== 0 || did
== ep
->did
)) {
1473 spin_unlock_bh(&mp
->em_lock
);
1477 fc_exch_release(ep
);
1478 spin_lock_bh(&mp
->em_lock
);
1481 * must restart loop incase while lock was down
1482 * multiple eps were released.
1487 spin_unlock_bh(&mp
->em_lock
);
1489 EXPORT_SYMBOL(fc_exch_mgr_reset
);
1492 * Handle incoming ELS REC - Read Exchange Concise.
1493 * Note that the requesting port may be different than the S_ID in the request.
1495 static void fc_exch_els_rec(struct fc_seq
*sp
, struct fc_frame
*rfp
)
1497 struct fc_frame
*fp
;
1499 struct fc_exch_mgr
*em
;
1500 struct fc_els_rec
*rp
;
1501 struct fc_els_rec_acc
*acc
;
1502 enum fc_els_rjt_reason reason
= ELS_RJT_LOGIC
;
1503 enum fc_els_rjt_explan explan
;
1508 rp
= fc_frame_payload_get(rfp
, sizeof(*rp
));
1509 explan
= ELS_EXPL_INV_LEN
;
1512 sid
= ntoh24(rp
->rec_s_id
);
1513 rxid
= ntohs(rp
->rec_rx_id
);
1514 oxid
= ntohs(rp
->rec_ox_id
);
1517 * Currently it's hard to find the local S_ID from the exchange
1518 * manager. This will eventually be fixed, but for now it's easier
1519 * to lookup the subject exchange twice, once as if we were
1520 * the initiator, and then again if we weren't.
1522 em
= fc_seq_exch(sp
)->em
;
1523 ep
= fc_exch_find(em
, oxid
);
1524 explan
= ELS_EXPL_OXID_RXID
;
1525 if (ep
&& ep
->oid
== sid
) {
1526 if (ep
->rxid
!= FC_XID_UNKNOWN
&&
1527 rxid
!= FC_XID_UNKNOWN
&&
1532 fc_exch_release(ep
);
1534 if (rxid
!= FC_XID_UNKNOWN
)
1535 ep
= fc_exch_find(em
, rxid
);
1540 fp
= fc_frame_alloc(fc_seq_exch(sp
)->lp
, sizeof(*acc
));
1545 sp
= fc_seq_start_next(sp
);
1546 acc
= fc_frame_payload_get(fp
, sizeof(*acc
));
1547 memset(acc
, 0, sizeof(*acc
));
1548 acc
->reca_cmd
= ELS_LS_ACC
;
1549 acc
->reca_ox_id
= rp
->rec_ox_id
;
1550 memcpy(acc
->reca_ofid
, rp
->rec_s_id
, 3);
1551 acc
->reca_rx_id
= htons(ep
->rxid
);
1552 if (ep
->sid
== ep
->oid
)
1553 hton24(acc
->reca_rfid
, ep
->did
);
1555 hton24(acc
->reca_rfid
, ep
->sid
);
1556 acc
->reca_fc4value
= htonl(ep
->seq
.rec_data
);
1557 acc
->reca_e_stat
= htonl(ep
->esb_stat
& (ESB_ST_RESP
|
1560 sp
= fc_seq_start_next(sp
);
1561 fc_seq_send_last(sp
, fp
, FC_RCTL_ELS_REP
, FC_TYPE_ELS
);
1563 fc_exch_release(ep
);
1568 fc_exch_release(ep
);
1570 fc_seq_ls_rjt(sp
, reason
, explan
);
1575 * Handle response from RRQ.
1576 * Not much to do here, really.
1577 * Should report errors.
1579 * TODO: fix error handler.
1581 static void fc_exch_rrq_resp(struct fc_seq
*sp
, struct fc_frame
*fp
, void *arg
)
1583 struct fc_exch
*aborted_ep
= arg
;
1587 int err
= PTR_ERR(fp
);
1589 if (err
== -FC_EX_CLOSED
|| err
== -FC_EX_TIMEOUT
)
1591 FC_EXCH_DBG(aborted_ep
, "Cannot process RRQ, "
1592 "frame error %d\n", err
);
1596 op
= fc_frame_payload_op(fp
);
1601 FC_EXCH_DBG(aborted_ep
, "LS_RJT for RRQ");
1606 FC_EXCH_DBG(aborted_ep
, "unexpected response op %x "
1612 fc_exch_done(&aborted_ep
->seq
);
1613 /* drop hold for rec qual */
1614 fc_exch_release(aborted_ep
);
1618 * Send ELS RRQ - Reinstate Recovery Qualifier.
1619 * This tells the remote port to stop blocking the use of
1620 * the exchange and the seq_cnt range.
1622 static void fc_exch_rrq(struct fc_exch
*ep
)
1624 struct fc_lport
*lp
;
1625 struct fc_els_rrq
*rrq
;
1626 struct fc_frame
*fp
;
1627 struct fc_seq
*rrq_sp
;
1632 fp
= fc_frame_alloc(lp
, sizeof(*rrq
));
1635 rrq
= fc_frame_payload_get(fp
, sizeof(*rrq
));
1636 memset(rrq
, 0, sizeof(*rrq
));
1637 rrq
->rrq_cmd
= ELS_RRQ
;
1638 hton24(rrq
->rrq_s_id
, ep
->sid
);
1639 rrq
->rrq_ox_id
= htons(ep
->oxid
);
1640 rrq
->rrq_rx_id
= htons(ep
->rxid
);
1643 if (ep
->esb_stat
& ESB_ST_RESP
)
1646 fc_fill_fc_hdr(fp
, FC_RCTL_ELS_REQ
, did
,
1647 fc_host_port_id(lp
->host
), FC_TYPE_ELS
,
1648 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1650 rrq_sp
= fc_exch_seq_send(lp
, fp
, fc_exch_rrq_resp
, NULL
, ep
,
1653 ep
->esb_stat
|= ESB_ST_REC_QUAL
;
1654 fc_exch_timer_set_locked(ep
, ep
->r_a_tov
);
1661 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1663 static void fc_exch_els_rrq(struct fc_seq
*sp
, struct fc_frame
*fp
)
1665 struct fc_exch
*ep
; /* request or subject exchange */
1666 struct fc_els_rrq
*rp
;
1669 enum fc_els_rjt_explan explan
;
1671 rp
= fc_frame_payload_get(fp
, sizeof(*rp
));
1672 explan
= ELS_EXPL_INV_LEN
;
1677 * lookup subject exchange.
1679 ep
= fc_seq_exch(sp
);
1680 sid
= ntoh24(rp
->rrq_s_id
); /* subject source */
1681 xid
= ep
->did
== sid
? ntohs(rp
->rrq_ox_id
) : ntohs(rp
->rrq_rx_id
);
1682 ep
= fc_exch_find(ep
->em
, xid
);
1684 explan
= ELS_EXPL_OXID_RXID
;
1687 spin_lock_bh(&ep
->ex_lock
);
1688 if (ep
->oxid
!= ntohs(rp
->rrq_ox_id
))
1690 if (ep
->rxid
!= ntohs(rp
->rrq_rx_id
) &&
1691 ep
->rxid
!= FC_XID_UNKNOWN
)
1693 explan
= ELS_EXPL_SID
;
1698 * Clear Recovery Qualifier state, and cancel timer if complete.
1700 if (ep
->esb_stat
& ESB_ST_REC_QUAL
) {
1701 ep
->esb_stat
&= ~ESB_ST_REC_QUAL
;
1702 atomic_dec(&ep
->ex_refcnt
); /* drop hold for rec qual */
1704 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1705 if (cancel_delayed_work(&ep
->timeout_work
))
1706 atomic_dec(&ep
->ex_refcnt
); /* drop timer hold */
1709 spin_unlock_bh(&ep
->ex_lock
);
1719 spin_unlock_bh(&ep
->ex_lock
);
1720 fc_exch_release(ep
); /* drop hold from fc_exch_find */
1722 fc_seq_ls_rjt(sp
, ELS_RJT_LOGIC
, explan
);
1726 struct fc_exch_mgr
*fc_exch_mgr_alloc(struct fc_lport
*lp
,
1727 enum fc_class
class,
1728 u16 min_xid
, u16 max_xid
)
1730 struct fc_exch_mgr
*mp
;
1733 if (max_xid
<= min_xid
|| min_xid
== 0 || max_xid
== FC_XID_UNKNOWN
) {
1734 FC_LPORT_DBG(lp
, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1740 * Memory need for EM
1742 #define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
1743 len
= (max_xid
- min_xid
+ 1) * (sizeof(struct fc_exch
*));
1744 len
+= sizeof(struct fc_exch_mgr
);
1746 mp
= kzalloc(len
, GFP_ATOMIC
);
1751 mp
->total_exches
= 0;
1752 mp
->exches
= (struct fc_exch
**)(mp
+ 1);
1754 /* adjust em exch xid range for offload */
1755 mp
->min_xid
= min_xid
;
1756 mp
->max_xid
= max_xid
;
1757 mp
->last_xid
= min_xid
- 1;
1760 if (lp
->lro_enabled
&& xid_ok(lp
->lro_xid
, min_xid
, max_xid
)) {
1761 mp
->max_read
= lp
->lro_xid
;
1762 mp
->last_read
= min_xid
- 1;
1763 mp
->last_xid
= mp
->max_read
;
1765 /* disable lro if no xid control over read */
1766 lp
->lro_enabled
= 0;
1769 INIT_LIST_HEAD(&mp
->ex_list
);
1770 spin_lock_init(&mp
->em_lock
);
1772 mp
->ep_pool
= mempool_create_slab_pool(2, fc_em_cachep
);
1782 EXPORT_SYMBOL(fc_exch_mgr_alloc
);
1784 void fc_exch_mgr_free(struct fc_exch_mgr
*mp
)
1788 * The total exch count must be zero
1789 * before freeing exchange manager.
1791 WARN_ON(mp
->total_exches
!= 0);
1792 mempool_destroy(mp
->ep_pool
);
1795 EXPORT_SYMBOL(fc_exch_mgr_free
);
1797 struct fc_exch
*fc_exch_get(struct fc_lport
*lp
, struct fc_frame
*fp
)
1799 if (!lp
|| !lp
->emp
)
1802 return fc_exch_alloc(lp
->emp
, fp
, 0);
1804 EXPORT_SYMBOL(fc_exch_get
);
1806 struct fc_seq
*fc_exch_seq_send(struct fc_lport
*lp
,
1807 struct fc_frame
*fp
,
1808 void (*resp
)(struct fc_seq
*,
1809 struct fc_frame
*fp
,
1811 void (*destructor
)(struct fc_seq
*, void *),
1812 void *arg
, u32 timer_msec
)
1815 struct fc_seq
*sp
= NULL
;
1816 struct fc_frame_header
*fh
;
1819 ep
= lp
->tt
.exch_get(lp
, fp
);
1824 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1825 fh
= fc_frame_header_get(fp
);
1826 fc_exch_set_addr(ep
, ntoh24(fh
->fh_s_id
), ntoh24(fh
->fh_d_id
));
1828 ep
->destructor
= destructor
;
1830 ep
->r_a_tov
= FC_DEF_R_A_TOV
;
1834 ep
->fh_type
= fh
->fh_type
; /* save for possbile timeout handling */
1835 ep
->f_ctl
= ntoh24(fh
->fh_f_ctl
);
1836 fc_exch_setup_hdr(ep
, fp
, ep
->f_ctl
);
1839 fc_fcp_ddp_setup(fr_fsp(fp
), ep
->xid
);
1841 if (unlikely(lp
->tt
.frame_send(lp
, fp
)))
1845 fc_exch_timer_set_locked(ep
, timer_msec
);
1846 ep
->f_ctl
&= ~FC_FC_FIRST_SEQ
; /* not first seq */
1848 if (ep
->f_ctl
& FC_FC_SEQ_INIT
)
1849 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
1850 spin_unlock_bh(&ep
->ex_lock
);
1853 rc
= fc_exch_done_locked(ep
);
1854 spin_unlock_bh(&ep
->ex_lock
);
1856 fc_exch_mgr_delete_ep(ep
);
1859 EXPORT_SYMBOL(fc_exch_seq_send
);
1864 void fc_exch_recv(struct fc_lport
*lp
, struct fc_exch_mgr
*mp
,
1865 struct fc_frame
*fp
)
1867 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1871 if (!lp
|| !mp
|| (lp
->state
== LPORT_ST_NONE
)) {
1872 FC_LPORT_DBG(lp
, "Receiving frames for an lport that "
1873 "has not been initialized correctly\n");
1879 * If frame is marked invalid, just drop it.
1881 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1882 switch (fr_eof(fp
)) {
1884 if (f_ctl
& FC_FC_END_SEQ
)
1885 skb_trim(fp_skb(fp
), fr_len(fp
) - FC_FC_FILL(f_ctl
));
1888 if (fh
->fh_type
== FC_TYPE_BLS
)
1889 fc_exch_recv_bls(mp
, fp
);
1890 else if ((f_ctl
& (FC_FC_EX_CTX
| FC_FC_SEQ_CTX
)) ==
1892 fc_exch_recv_seq_resp(mp
, fp
);
1893 else if (f_ctl
& FC_FC_SEQ_CTX
)
1894 fc_exch_recv_resp(mp
, fp
);
1896 fc_exch_recv_req(lp
, mp
, fp
);
1899 FC_EM_DBG(mp
, "dropping invalid frame (eof %x)", fr_eof(fp
));
1904 EXPORT_SYMBOL(fc_exch_recv
);
1906 int fc_exch_init(struct fc_lport
*lp
)
1908 if (!lp
->tt
.exch_get
) {
1910 * exch_put() should be NULL if
1911 * exch_get() is NULL
1913 WARN_ON(lp
->tt
.exch_put
);
1914 lp
->tt
.exch_get
= fc_exch_get
;
1917 if (!lp
->tt
.seq_start_next
)
1918 lp
->tt
.seq_start_next
= fc_seq_start_next
;
1920 if (!lp
->tt
.exch_seq_send
)
1921 lp
->tt
.exch_seq_send
= fc_exch_seq_send
;
1923 if (!lp
->tt
.seq_send
)
1924 lp
->tt
.seq_send
= fc_seq_send
;
1926 if (!lp
->tt
.seq_els_rsp_send
)
1927 lp
->tt
.seq_els_rsp_send
= fc_seq_els_rsp_send
;
1929 if (!lp
->tt
.exch_done
)
1930 lp
->tt
.exch_done
= fc_exch_done
;
1932 if (!lp
->tt
.exch_mgr_reset
)
1933 lp
->tt
.exch_mgr_reset
= fc_exch_mgr_reset
;
1935 if (!lp
->tt
.seq_exch_abort
)
1936 lp
->tt
.seq_exch_abort
= fc_seq_exch_abort
;
1940 EXPORT_SYMBOL(fc_exch_init
);
1942 int fc_setup_exch_mgr(void)
1944 fc_em_cachep
= kmem_cache_create("libfc_em", sizeof(struct fc_exch
),
1945 0, SLAB_HWCACHE_ALIGN
, NULL
);
1951 void fc_destroy_exch_mgr(void)
1953 kmem_cache_destroy(fc_em_cachep
);