2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * Maintained at www.Open-FCoE.org
23 * Fibre Channel exchange and sequence handling.
26 #include <linux/timer.h>
27 #include <linux/gfp.h>
28 #include <linux/err.h>
30 #include <scsi/fc/fc_fc2.h>
32 #include <scsi/libfc.h>
33 #include <scsi/fc_encode.h>
35 u16 fc_cpu_mask
; /* cpu mask for possible cpus */
36 EXPORT_SYMBOL(fc_cpu_mask
);
37 static u16 fc_cpu_order
; /* 2's power to represent total possible cpus */
38 static struct kmem_cache
*fc_em_cachep
; /* cache for exchanges */
41 * Structure and function definitions for managing Fibre Channel Exchanges
44 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
46 * fc_exch_mgr holds the exchange state for an N port
48 * fc_exch holds state for one exchange and links to its active sequence.
50 * fc_seq holds the state for an individual sequence.
54 * Per cpu exchange pool
56 * This structure manages per cpu exchanges in array of exchange pointers.
57 * This array is allocated followed by struct fc_exch_pool memory for
58 * assigned range of exchanges to per cpu pool.
61 u16 next_index
; /* next possible free exchange index */
62 u16 total_exches
; /* total allocated exchanges */
63 spinlock_t lock
; /* exch pool lock */
64 struct list_head ex_list
; /* allocated exchanges list */
70 * This structure is the center for creating exchanges and sequences.
71 * It manages the allocation of exchange IDs.
74 enum fc_class
class; /* default class for sequences */
75 struct kref kref
; /* exchange mgr reference count */
76 u16 min_xid
; /* min exchange ID */
77 u16 max_xid
; /* max exchange ID */
78 struct list_head ex_list
; /* allocated exchanges list */
79 mempool_t
*ep_pool
; /* reserve ep's */
80 u16 pool_max_index
; /* max exch array index in exch pool */
81 struct fc_exch_pool
*pool
; /* per cpu exch pool */
84 * currently exchange mgr stats are updated but not used.
85 * either stats can be expose via sysfs or remove them
86 * all together if not used XXX
89 atomic_t no_free_exch
;
90 atomic_t no_free_exch_xid
;
91 atomic_t xid_not_found
;
93 atomic_t seq_not_found
;
94 atomic_t non_bls_resp
;
97 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
99 struct fc_exch_mgr_anchor
{
100 struct list_head ema_list
;
101 struct fc_exch_mgr
*mp
;
102 bool (*match
)(struct fc_frame
*);
105 static void fc_exch_rrq(struct fc_exch
*);
106 static void fc_seq_ls_acc(struct fc_seq
*);
107 static void fc_seq_ls_rjt(struct fc_seq
*, enum fc_els_rjt_reason
,
108 enum fc_els_rjt_explan
);
109 static void fc_exch_els_rec(struct fc_seq
*, struct fc_frame
*);
110 static void fc_exch_els_rrq(struct fc_seq
*, struct fc_frame
*);
111 static struct fc_seq
*fc_seq_start_next_locked(struct fc_seq
*sp
);
114 * Internal implementation notes.
116 * The exchange manager is one by default in libfc but LLD may choose
117 * to have one per CPU. The sequence manager is one per exchange manager
118 * and currently never separated.
120 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
121 * assigned by the Sequence Initiator that shall be unique for a specific
122 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
123 * qualified by exchange ID, which one might think it would be.
124 * In practice this limits the number of open sequences and exchanges to 256
125 * per session. For most targets we could treat this limit as per exchange.
127 * The exchange and its sequence are freed when the last sequence is received.
128 * It's possible for the remote port to leave an exchange open without
129 * sending any sequences.
131 * Notes on reference counts:
133 * Exchanges are reference counted and exchange gets freed when the reference
134 * count becomes zero.
137 * Sequences are timed out for E_D_TOV and R_A_TOV.
139 * Sequence event handling:
141 * The following events may occur on initiator sequences:
144 * For now, the whole thing is sent.
146 * This applies only to class F.
147 * The sequence is marked complete.
149 * The upper layer calls fc_exch_done() when done
150 * with exchange and sequence tuple.
151 * RX-inferred completion.
152 * When we receive the next sequence on the same exchange, we can
153 * retire the previous sequence ID. (XXX not implemented).
155 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
156 * E_D_TOV causes abort and calls upper layer response handler
157 * with FC_EX_TIMEOUT error.
163 * The following events may occur on recipient sequences:
166 * Allocate sequence for first frame received.
167 * Hold during receive handler.
168 * Release when final frame received.
169 * Keep status of last N of these for the ELS RES command. XXX TBD.
171 * Deallocate sequence
175 * For now, we neglect conditions where only part of a sequence was
176 * received or transmitted, or where out-of-order receipt is detected.
182 * The EM code run in a per-CPU worker thread.
184 * To protect against concurrency between a worker thread code and timers,
185 * sequence allocation and deallocation must be locked.
186 * - exchange refcnt can be done atomicly without locks.
187 * - sequence allocation must be locked by exch lock.
188 * - If the EM pool lock and ex_lock must be taken at the same time, then the
189 * EM pool lock must be taken before the ex_lock.
193 * opcode names for debugging.
195 static char *fc_exch_rctl_names
[] = FC_RCTL_NAMES_INIT
;
197 #define FC_TABLE_SIZE(x) (sizeof(x) / sizeof(x[0]))
199 static inline const char *fc_exch_name_lookup(unsigned int op
, char **table
,
200 unsigned int max_index
)
202 const char *name
= NULL
;
211 static const char *fc_exch_rctl_name(unsigned int op
)
213 return fc_exch_name_lookup(op
, fc_exch_rctl_names
,
214 FC_TABLE_SIZE(fc_exch_rctl_names
));
218 * Hold an exchange - keep it from being freed.
220 static void fc_exch_hold(struct fc_exch
*ep
)
222 atomic_inc(&ep
->ex_refcnt
);
226 * setup fc hdr by initializing few more FC header fields and sof/eof.
227 * Initialized fields by this func:
228 * - fh_ox_id, fh_rx_id, fh_seq_id, fh_seq_cnt
231 static void fc_exch_setup_hdr(struct fc_exch
*ep
, struct fc_frame
*fp
,
234 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
237 fr_sof(fp
) = ep
->class;
239 fr_sof(fp
) = fc_sof_normal(ep
->class);
241 if (f_ctl
& FC_FC_END_SEQ
) {
242 fr_eof(fp
) = FC_EOF_T
;
243 if (fc_sof_needs_ack(ep
->class))
244 fr_eof(fp
) = FC_EOF_N
;
247 * The number of fill bytes to make the length a 4-byte
248 * multiple is the low order 2-bits of the f_ctl.
249 * The fill itself will have been cleared by the frame
251 * After this, the length will be even, as expected by
254 fill
= fr_len(fp
) & 3;
257 /* TODO, this may be a problem with fragmented skb */
258 skb_put(fp_skb(fp
), fill
);
259 hton24(fh
->fh_f_ctl
, f_ctl
| fill
);
262 WARN_ON(fr_len(fp
) % 4 != 0); /* no pad to non last frame */
263 fr_eof(fp
) = FC_EOF_N
;
267 * Initialize remainig fh fields
268 * from fc_fill_fc_hdr
270 fh
->fh_ox_id
= htons(ep
->oxid
);
271 fh
->fh_rx_id
= htons(ep
->rxid
);
272 fh
->fh_seq_id
= ep
->seq
.id
;
273 fh
->fh_seq_cnt
= htons(ep
->seq
.cnt
);
278 * Release a reference to an exchange.
279 * If the refcnt goes to zero and the exchange is complete, it is freed.
281 static void fc_exch_release(struct fc_exch
*ep
)
283 struct fc_exch_mgr
*mp
;
285 if (atomic_dec_and_test(&ep
->ex_refcnt
)) {
288 ep
->destructor(&ep
->seq
, ep
->arg
);
289 WARN_ON(!(ep
->esb_stat
& ESB_ST_COMPLETE
));
290 mempool_free(ep
, mp
->ep_pool
);
294 static int fc_exch_done_locked(struct fc_exch
*ep
)
299 * We must check for completion in case there are two threads
300 * tyring to complete this. But the rrq code will reuse the
301 * ep, and in that case we only clear the resp and set it as
302 * complete, so it can be reused by the timer to send the rrq.
305 if (ep
->state
& FC_EX_DONE
)
307 ep
->esb_stat
|= ESB_ST_COMPLETE
;
309 if (!(ep
->esb_stat
& ESB_ST_REC_QUAL
)) {
310 ep
->state
|= FC_EX_DONE
;
311 if (cancel_delayed_work(&ep
->timeout_work
))
312 atomic_dec(&ep
->ex_refcnt
); /* drop hold for timer */
318 static inline struct fc_exch
*fc_exch_ptr_get(struct fc_exch_pool
*pool
,
321 struct fc_exch
**exches
= (struct fc_exch
**)(pool
+ 1);
322 return exches
[index
];
325 static inline void fc_exch_ptr_set(struct fc_exch_pool
*pool
, u16 index
,
328 ((struct fc_exch
**)(pool
+ 1))[index
] = ep
;
331 static void fc_exch_delete(struct fc_exch
*ep
)
333 struct fc_exch_pool
*pool
;
336 spin_lock_bh(&pool
->lock
);
337 WARN_ON(pool
->total_exches
<= 0);
338 pool
->total_exches
--;
339 fc_exch_ptr_set(pool
, (ep
->xid
- ep
->em
->min_xid
) >> fc_cpu_order
,
341 list_del(&ep
->ex_list
);
342 spin_unlock_bh(&pool
->lock
);
343 fc_exch_release(ep
); /* drop hold for exch in mp */
347 * Internal version of fc_exch_timer_set - used with lock held.
349 static inline void fc_exch_timer_set_locked(struct fc_exch
*ep
,
350 unsigned int timer_msec
)
352 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
))
355 FC_EXCH_DBG(ep
, "Exchange timer armed\n");
357 if (schedule_delayed_work(&ep
->timeout_work
,
358 msecs_to_jiffies(timer_msec
)))
359 fc_exch_hold(ep
); /* hold for timer */
363 * Set timer for an exchange.
364 * The time is a minimum delay in milliseconds until the timer fires.
365 * Used for upper level protocols to time out the exchange.
366 * The timer is cancelled when it fires or when the exchange completes.
367 * Returns non-zero if a timer couldn't be allocated.
369 static void fc_exch_timer_set(struct fc_exch
*ep
, unsigned int timer_msec
)
371 spin_lock_bh(&ep
->ex_lock
);
372 fc_exch_timer_set_locked(ep
, timer_msec
);
373 spin_unlock_bh(&ep
->ex_lock
);
376 int fc_seq_exch_abort(const struct fc_seq
*req_sp
, unsigned int timer_msec
)
383 ep
= fc_seq_exch(req_sp
);
385 spin_lock_bh(&ep
->ex_lock
);
386 if (ep
->esb_stat
& (ESB_ST_COMPLETE
| ESB_ST_ABNORMAL
) ||
387 ep
->state
& (FC_EX_DONE
| FC_EX_RST_CLEANUP
)) {
388 spin_unlock_bh(&ep
->ex_lock
);
393 * Send the abort on a new sequence if possible.
395 sp
= fc_seq_start_next_locked(&ep
->seq
);
397 spin_unlock_bh(&ep
->ex_lock
);
401 ep
->esb_stat
|= ESB_ST_SEQ_INIT
| ESB_ST_ABNORMAL
;
403 fc_exch_timer_set_locked(ep
, timer_msec
);
404 spin_unlock_bh(&ep
->ex_lock
);
407 * If not logged into the fabric, don't send ABTS but leave
408 * sequence active until next timeout.
414 * Send an abort for the sequence that timed out.
416 fp
= fc_frame_alloc(ep
->lp
, 0);
418 fc_fill_fc_hdr(fp
, FC_RCTL_BA_ABTS
, ep
->did
, ep
->sid
,
419 FC_TYPE_BLS
, FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
420 error
= fc_seq_send(ep
->lp
, sp
, fp
);
425 EXPORT_SYMBOL(fc_seq_exch_abort
);
428 * Exchange timeout - handle exchange timer expiration.
429 * The timer will have been cancelled before this is called.
431 static void fc_exch_timeout(struct work_struct
*work
)
433 struct fc_exch
*ep
= container_of(work
, struct fc_exch
,
435 struct fc_seq
*sp
= &ep
->seq
;
436 void (*resp
)(struct fc_seq
*, struct fc_frame
*fp
, void *arg
);
441 FC_EXCH_DBG(ep
, "Exchange timed out\n");
443 spin_lock_bh(&ep
->ex_lock
);
444 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
))
447 e_stat
= ep
->esb_stat
;
448 if (e_stat
& ESB_ST_COMPLETE
) {
449 ep
->esb_stat
= e_stat
& ~ESB_ST_REC_QUAL
;
450 spin_unlock_bh(&ep
->ex_lock
);
451 if (e_stat
& ESB_ST_REC_QUAL
)
458 if (e_stat
& ESB_ST_ABNORMAL
)
459 rc
= fc_exch_done_locked(ep
);
460 spin_unlock_bh(&ep
->ex_lock
);
464 resp(sp
, ERR_PTR(-FC_EX_TIMEOUT
), arg
);
465 fc_seq_exch_abort(sp
, 2 * ep
->r_a_tov
);
469 spin_unlock_bh(&ep
->ex_lock
);
472 * This release matches the hold taken when the timer was set.
478 * Allocate a sequence.
480 * We don't support multiple originated sequences on the same exchange.
481 * By implication, any previously originated sequence on this exchange
482 * is complete, and we reallocate the same sequence.
484 static struct fc_seq
*fc_seq_alloc(struct fc_exch
*ep
, u8 seq_id
)
496 * fc_exch_em_alloc() - allocate an exchange from a specified EM.
497 * @lport: ptr to the local port
498 * @mp: ptr to the exchange manager
500 * Returns pointer to allocated fc_exch with exch lock held.
502 static struct fc_exch
*fc_exch_em_alloc(struct fc_lport
*lport
,
503 struct fc_exch_mgr
*mp
)
508 struct fc_exch_pool
*pool
;
510 /* allocate memory for exchange */
511 ep
= mempool_alloc(mp
->ep_pool
, GFP_ATOMIC
);
513 atomic_inc(&mp
->stats
.no_free_exch
);
516 memset(ep
, 0, sizeof(*ep
));
518 cpu
= smp_processor_id();
519 pool
= per_cpu_ptr(mp
->pool
, cpu
);
520 spin_lock_bh(&pool
->lock
);
521 index
= pool
->next_index
;
522 /* allocate new exch from pool */
523 while (fc_exch_ptr_get(pool
, index
)) {
524 index
= index
== mp
->pool_max_index
? 0 : index
+ 1;
525 if (index
== pool
->next_index
)
528 pool
->next_index
= index
== mp
->pool_max_index
? 0 : index
+ 1;
530 fc_exch_hold(ep
); /* hold for exch in mp */
531 spin_lock_init(&ep
->ex_lock
);
533 * Hold exch lock for caller to prevent fc_exch_reset()
534 * from releasing exch while fc_exch_alloc() caller is
535 * still working on exch.
537 spin_lock_bh(&ep
->ex_lock
);
539 fc_exch_ptr_set(pool
, index
, ep
);
540 list_add_tail(&ep
->ex_list
, &pool
->ex_list
);
541 fc_seq_alloc(ep
, ep
->seq_id
++);
542 pool
->total_exches
++;
543 spin_unlock_bh(&pool
->lock
);
548 ep
->oxid
= ep
->xid
= (index
<< fc_cpu_order
| cpu
) + mp
->min_xid
;
552 ep
->f_ctl
= FC_FC_FIRST_SEQ
; /* next seq is first seq */
553 ep
->rxid
= FC_XID_UNKNOWN
;
554 ep
->class = mp
->class;
555 INIT_DELAYED_WORK(&ep
->timeout_work
, fc_exch_timeout
);
559 spin_unlock_bh(&pool
->lock
);
560 atomic_inc(&mp
->stats
.no_free_exch_xid
);
561 mempool_free(ep
, mp
->ep_pool
);
566 * fc_exch_alloc() - allocate an exchange.
567 * @lport: ptr to the local port
568 * @fp: ptr to the FC frame
570 * This function walks the list of the exchange manager(EM)
571 * anchors to select a EM for new exchange allocation. The
572 * EM is selected having either a NULL match function pointer
573 * or call to match function returning true.
575 struct fc_exch
*fc_exch_alloc(struct fc_lport
*lport
, struct fc_frame
*fp
)
577 struct fc_exch_mgr_anchor
*ema
;
580 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
) {
581 if (!ema
->match
|| ema
->match(fp
)) {
582 ep
= fc_exch_em_alloc(lport
, ema
->mp
);
589 EXPORT_SYMBOL(fc_exch_alloc
);
592 * Lookup and hold an exchange.
594 static struct fc_exch
*fc_exch_find(struct fc_exch_mgr
*mp
, u16 xid
)
596 struct fc_exch_pool
*pool
;
597 struct fc_exch
*ep
= NULL
;
599 if ((xid
>= mp
->min_xid
) && (xid
<= mp
->max_xid
)) {
600 pool
= per_cpu_ptr(mp
->pool
, xid
& fc_cpu_mask
);
601 spin_lock_bh(&pool
->lock
);
602 ep
= fc_exch_ptr_get(pool
, (xid
- mp
->min_xid
) >> fc_cpu_order
);
605 WARN_ON(ep
->xid
!= xid
);
607 spin_unlock_bh(&pool
->lock
);
612 void fc_exch_done(struct fc_seq
*sp
)
614 struct fc_exch
*ep
= fc_seq_exch(sp
);
617 spin_lock_bh(&ep
->ex_lock
);
618 rc
= fc_exch_done_locked(ep
);
619 spin_unlock_bh(&ep
->ex_lock
);
623 EXPORT_SYMBOL(fc_exch_done
);
626 * Allocate a new exchange as responder.
627 * Sets the responder ID in the frame header.
629 static struct fc_exch
*fc_exch_resp(struct fc_lport
*lport
,
630 struct fc_exch_mgr
*mp
,
634 struct fc_frame_header
*fh
;
636 ep
= fc_exch_alloc(lport
, fp
);
638 ep
->class = fc_frame_class(fp
);
641 * Set EX_CTX indicating we're responding on this exchange.
643 ep
->f_ctl
|= FC_FC_EX_CTX
; /* we're responding */
644 ep
->f_ctl
&= ~FC_FC_FIRST_SEQ
; /* not new */
645 fh
= fc_frame_header_get(fp
);
646 ep
->sid
= ntoh24(fh
->fh_d_id
);
647 ep
->did
= ntoh24(fh
->fh_s_id
);
651 * Allocated exchange has placed the XID in the
652 * originator field. Move it to the responder field,
653 * and set the originator XID from the frame.
656 ep
->oxid
= ntohs(fh
->fh_ox_id
);
657 ep
->esb_stat
|= ESB_ST_RESP
| ESB_ST_SEQ_INIT
;
658 if ((ntoh24(fh
->fh_f_ctl
) & FC_FC_SEQ_INIT
) == 0)
659 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
661 fc_exch_hold(ep
); /* hold for caller */
662 spin_unlock_bh(&ep
->ex_lock
); /* lock from fc_exch_alloc */
668 * Find a sequence for receive where the other end is originating the sequence.
669 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
670 * on the ep that should be released by the caller.
672 static enum fc_pf_rjt_reason
fc_seq_lookup_recip(struct fc_lport
*lport
,
673 struct fc_exch_mgr
*mp
,
676 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
677 struct fc_exch
*ep
= NULL
;
678 struct fc_seq
*sp
= NULL
;
679 enum fc_pf_rjt_reason reject
= FC_RJT_NONE
;
683 f_ctl
= ntoh24(fh
->fh_f_ctl
);
684 WARN_ON((f_ctl
& FC_FC_SEQ_CTX
) != 0);
687 * Lookup or create the exchange if we will be creating the sequence.
689 if (f_ctl
& FC_FC_EX_CTX
) {
690 xid
= ntohs(fh
->fh_ox_id
); /* we originated exch */
691 ep
= fc_exch_find(mp
, xid
);
693 atomic_inc(&mp
->stats
.xid_not_found
);
694 reject
= FC_RJT_OX_ID
;
697 if (ep
->rxid
== FC_XID_UNKNOWN
)
698 ep
->rxid
= ntohs(fh
->fh_rx_id
);
699 else if (ep
->rxid
!= ntohs(fh
->fh_rx_id
)) {
700 reject
= FC_RJT_OX_ID
;
704 xid
= ntohs(fh
->fh_rx_id
); /* we are the responder */
707 * Special case for MDS issuing an ELS TEST with a
709 * XXX take this out once we do the proper reject.
711 if (xid
== 0 && fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
&&
712 fc_frame_payload_op(fp
) == ELS_TEST
) {
713 fh
->fh_rx_id
= htons(FC_XID_UNKNOWN
);
714 xid
= FC_XID_UNKNOWN
;
718 * new sequence - find the exchange
720 ep
= fc_exch_find(mp
, xid
);
721 if ((f_ctl
& FC_FC_FIRST_SEQ
) && fc_sof_is_init(fr_sof(fp
))) {
723 atomic_inc(&mp
->stats
.xid_busy
);
724 reject
= FC_RJT_RX_ID
;
727 ep
= fc_exch_resp(lport
, mp
, fp
);
729 reject
= FC_RJT_EXCH_EST
; /* XXX */
732 xid
= ep
->xid
; /* get our XID */
734 atomic_inc(&mp
->stats
.xid_not_found
);
735 reject
= FC_RJT_RX_ID
; /* XID not found */
741 * At this point, we have the exchange held.
742 * Find or create the sequence.
744 if (fc_sof_is_init(fr_sof(fp
))) {
745 sp
= fc_seq_start_next(&ep
->seq
);
747 reject
= FC_RJT_SEQ_XS
; /* exchange shortage */
750 sp
->id
= fh
->fh_seq_id
;
751 sp
->ssb_stat
|= SSB_ST_RESP
;
754 if (sp
->id
!= fh
->fh_seq_id
) {
755 atomic_inc(&mp
->stats
.seq_not_found
);
756 reject
= FC_RJT_SEQ_ID
; /* sequence/exch should exist */
760 WARN_ON(ep
!= fc_seq_exch(sp
));
762 if (f_ctl
& FC_FC_SEQ_INIT
)
763 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
769 fc_exch_done(&ep
->seq
);
770 fc_exch_release(ep
); /* hold from fc_exch_find/fc_exch_resp */
775 * Find the sequence for a frame being received.
776 * We originated the sequence, so it should be found.
777 * We may or may not have originated the exchange.
778 * Does not hold the sequence for the caller.
780 static struct fc_seq
*fc_seq_lookup_orig(struct fc_exch_mgr
*mp
,
783 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
785 struct fc_seq
*sp
= NULL
;
789 f_ctl
= ntoh24(fh
->fh_f_ctl
);
790 WARN_ON((f_ctl
& FC_FC_SEQ_CTX
) != FC_FC_SEQ_CTX
);
791 xid
= ntohs((f_ctl
& FC_FC_EX_CTX
) ? fh
->fh_ox_id
: fh
->fh_rx_id
);
792 ep
= fc_exch_find(mp
, xid
);
795 if (ep
->seq
.id
== fh
->fh_seq_id
) {
797 * Save the RX_ID if we didn't previously know it.
800 if ((f_ctl
& FC_FC_EX_CTX
) != 0 &&
801 ep
->rxid
== FC_XID_UNKNOWN
) {
802 ep
->rxid
= ntohs(fh
->fh_rx_id
);
810 * Set addresses for an exchange.
811 * Note this must be done before the first sequence of the exchange is sent.
813 static void fc_exch_set_addr(struct fc_exch
*ep
,
814 u32 orig_id
, u32 resp_id
)
817 if (ep
->esb_stat
& ESB_ST_RESP
) {
826 static struct fc_seq
*fc_seq_start_next_locked(struct fc_seq
*sp
)
828 struct fc_exch
*ep
= fc_seq_exch(sp
);
830 sp
= fc_seq_alloc(ep
, ep
->seq_id
++);
831 FC_EXCH_DBG(ep
, "f_ctl %6x seq %2x\n",
836 * Allocate a new sequence on the same exchange as the supplied sequence.
837 * This will never return NULL.
839 struct fc_seq
*fc_seq_start_next(struct fc_seq
*sp
)
841 struct fc_exch
*ep
= fc_seq_exch(sp
);
843 spin_lock_bh(&ep
->ex_lock
);
844 sp
= fc_seq_start_next_locked(sp
);
845 spin_unlock_bh(&ep
->ex_lock
);
849 EXPORT_SYMBOL(fc_seq_start_next
);
851 int fc_seq_send(struct fc_lport
*lp
, struct fc_seq
*sp
, struct fc_frame
*fp
)
854 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
858 ep
= fc_seq_exch(sp
);
859 WARN_ON((ep
->esb_stat
& ESB_ST_SEQ_INIT
) != ESB_ST_SEQ_INIT
);
861 f_ctl
= ntoh24(fh
->fh_f_ctl
);
862 fc_exch_setup_hdr(ep
, fp
, f_ctl
);
865 * update sequence count if this frame is carrying
866 * multiple FC frames when sequence offload is enabled
869 if (fr_max_payload(fp
))
870 sp
->cnt
+= DIV_ROUND_UP((fr_len(fp
) - sizeof(*fh
)),
878 error
= lp
->tt
.frame_send(lp
, fp
);
881 * Update the exchange and sequence flags,
882 * assuming all frames for the sequence have been sent.
883 * We can only be called to send once for each sequence.
885 spin_lock_bh(&ep
->ex_lock
);
886 ep
->f_ctl
= f_ctl
& ~FC_FC_FIRST_SEQ
; /* not first seq */
887 if (f_ctl
& (FC_FC_END_SEQ
| FC_FC_SEQ_INIT
))
888 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
889 spin_unlock_bh(&ep
->ex_lock
);
892 EXPORT_SYMBOL(fc_seq_send
);
894 void fc_seq_els_rsp_send(struct fc_seq
*sp
, enum fc_els_cmd els_cmd
,
895 struct fc_seq_els_data
*els_data
)
899 fc_seq_ls_rjt(sp
, els_data
->reason
, els_data
->explan
);
905 fc_exch_els_rrq(sp
, els_data
->fp
);
908 fc_exch_els_rec(sp
, els_data
->fp
);
911 FC_EXCH_DBG(fc_seq_exch(sp
), "Invalid ELS CMD:%x\n", els_cmd
);
914 EXPORT_SYMBOL(fc_seq_els_rsp_send
);
917 * Send a sequence, which is also the last sequence in the exchange.
919 static void fc_seq_send_last(struct fc_seq
*sp
, struct fc_frame
*fp
,
920 enum fc_rctl rctl
, enum fc_fh_type fh_type
)
923 struct fc_exch
*ep
= fc_seq_exch(sp
);
925 f_ctl
= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
;
927 fc_fill_fc_hdr(fp
, rctl
, ep
->did
, ep
->sid
, fh_type
, f_ctl
, 0);
928 fc_seq_send(ep
->lp
, sp
, fp
);
932 * Send ACK_1 (or equiv.) indicating we received something.
933 * The frame we're acking is supplied.
935 static void fc_seq_send_ack(struct fc_seq
*sp
, const struct fc_frame
*rx_fp
)
938 struct fc_frame_header
*rx_fh
;
939 struct fc_frame_header
*fh
;
940 struct fc_exch
*ep
= fc_seq_exch(sp
);
941 struct fc_lport
*lp
= ep
->lp
;
945 * Don't send ACKs for class 3.
947 if (fc_sof_needs_ack(fr_sof(rx_fp
))) {
948 fp
= fc_frame_alloc(lp
, 0);
952 fh
= fc_frame_header_get(fp
);
953 fh
->fh_r_ctl
= FC_RCTL_ACK_1
;
954 fh
->fh_type
= FC_TYPE_BLS
;
957 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
958 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
959 * Bits 9-8 are meaningful (retransmitted or unidirectional).
960 * Last ACK uses bits 7-6 (continue sequence),
961 * bits 5-4 are meaningful (what kind of ACK to use).
963 rx_fh
= fc_frame_header_get(rx_fp
);
964 f_ctl
= ntoh24(rx_fh
->fh_f_ctl
);
965 f_ctl
&= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
|
966 FC_FC_FIRST_SEQ
| FC_FC_LAST_SEQ
|
967 FC_FC_END_SEQ
| FC_FC_END_CONN
| FC_FC_SEQ_INIT
|
968 FC_FC_RETX_SEQ
| FC_FC_UNI_TX
;
969 f_ctl
^= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
;
970 hton24(fh
->fh_f_ctl
, f_ctl
);
972 fc_exch_setup_hdr(ep
, fp
, f_ctl
);
973 fh
->fh_seq_id
= rx_fh
->fh_seq_id
;
974 fh
->fh_seq_cnt
= rx_fh
->fh_seq_cnt
;
975 fh
->fh_parm_offset
= htonl(1); /* ack single frame */
977 fr_sof(fp
) = fr_sof(rx_fp
);
978 if (f_ctl
& FC_FC_END_SEQ
)
979 fr_eof(fp
) = FC_EOF_T
;
981 fr_eof(fp
) = FC_EOF_N
;
983 (void) lp
->tt
.frame_send(lp
, fp
);
989 * This is for rejecting BA_ABTS only.
991 static void fc_exch_send_ba_rjt(struct fc_frame
*rx_fp
,
992 enum fc_ba_rjt_reason reason
,
993 enum fc_ba_rjt_explan explan
)
996 struct fc_frame_header
*rx_fh
;
997 struct fc_frame_header
*fh
;
998 struct fc_ba_rjt
*rp
;
1003 fp
= fc_frame_alloc(lp
, sizeof(*rp
));
1006 fh
= fc_frame_header_get(fp
);
1007 rx_fh
= fc_frame_header_get(rx_fp
);
1009 memset(fh
, 0, sizeof(*fh
) + sizeof(*rp
));
1011 rp
= fc_frame_payload_get(fp
, sizeof(*rp
));
1012 rp
->br_reason
= reason
;
1013 rp
->br_explan
= explan
;
1016 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1018 memcpy(fh
->fh_s_id
, rx_fh
->fh_d_id
, 3);
1019 memcpy(fh
->fh_d_id
, rx_fh
->fh_s_id
, 3);
1020 fh
->fh_ox_id
= rx_fh
->fh_ox_id
;
1021 fh
->fh_rx_id
= rx_fh
->fh_rx_id
;
1022 fh
->fh_seq_cnt
= rx_fh
->fh_seq_cnt
;
1023 fh
->fh_r_ctl
= FC_RCTL_BA_RJT
;
1024 fh
->fh_type
= FC_TYPE_BLS
;
1027 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1028 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1029 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1030 * Last ACK uses bits 7-6 (continue sequence),
1031 * bits 5-4 are meaningful (what kind of ACK to use).
1032 * Always set LAST_SEQ, END_SEQ.
1034 f_ctl
= ntoh24(rx_fh
->fh_f_ctl
);
1035 f_ctl
&= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
|
1036 FC_FC_END_CONN
| FC_FC_SEQ_INIT
|
1037 FC_FC_RETX_SEQ
| FC_FC_UNI_TX
;
1038 f_ctl
^= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
;
1039 f_ctl
|= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
;
1040 f_ctl
&= ~FC_FC_FIRST_SEQ
;
1041 hton24(fh
->fh_f_ctl
, f_ctl
);
1043 fr_sof(fp
) = fc_sof_class(fr_sof(rx_fp
));
1044 fr_eof(fp
) = FC_EOF_T
;
1045 if (fc_sof_needs_ack(fr_sof(fp
)))
1046 fr_eof(fp
) = FC_EOF_N
;
1048 (void) lp
->tt
.frame_send(lp
, fp
);
1052 * Handle an incoming ABTS. This would be for target mode usually,
1053 * but could be due to lost FCP transfer ready, confirm or RRQ.
1054 * We always handle this as an exchange abort, ignoring the parameter.
1056 static void fc_exch_recv_abts(struct fc_exch
*ep
, struct fc_frame
*rx_fp
)
1058 struct fc_frame
*fp
;
1059 struct fc_ba_acc
*ap
;
1060 struct fc_frame_header
*fh
;
1065 spin_lock_bh(&ep
->ex_lock
);
1066 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1067 spin_unlock_bh(&ep
->ex_lock
);
1070 if (!(ep
->esb_stat
& ESB_ST_REC_QUAL
))
1071 fc_exch_hold(ep
); /* hold for REC_QUAL */
1072 ep
->esb_stat
|= ESB_ST_ABNORMAL
| ESB_ST_REC_QUAL
;
1073 fc_exch_timer_set_locked(ep
, ep
->r_a_tov
);
1075 fp
= fc_frame_alloc(ep
->lp
, sizeof(*ap
));
1077 spin_unlock_bh(&ep
->ex_lock
);
1080 fh
= fc_frame_header_get(fp
);
1081 ap
= fc_frame_payload_get(fp
, sizeof(*ap
));
1082 memset(ap
, 0, sizeof(*ap
));
1084 ap
->ba_high_seq_cnt
= htons(0xffff);
1085 if (sp
->ssb_stat
& SSB_ST_RESP
) {
1086 ap
->ba_seq_id
= sp
->id
;
1087 ap
->ba_seq_id_val
= FC_BA_SEQ_ID_VAL
;
1088 ap
->ba_high_seq_cnt
= fh
->fh_seq_cnt
;
1089 ap
->ba_low_seq_cnt
= htons(sp
->cnt
);
1091 sp
= fc_seq_start_next_locked(sp
);
1092 spin_unlock_bh(&ep
->ex_lock
);
1093 fc_seq_send_last(sp
, fp
, FC_RCTL_BA_ACC
, FC_TYPE_BLS
);
1094 fc_frame_free(rx_fp
);
1098 fc_exch_send_ba_rjt(rx_fp
, FC_BA_RJT_UNABLE
, FC_BA_RJT_INV_XID
);
1100 fc_frame_free(rx_fp
);
1104 * Handle receive where the other end is originating the sequence.
1106 static void fc_exch_recv_req(struct fc_lport
*lp
, struct fc_exch_mgr
*mp
,
1107 struct fc_frame
*fp
)
1109 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1110 struct fc_seq
*sp
= NULL
;
1111 struct fc_exch
*ep
= NULL
;
1115 enum fc_pf_rjt_reason reject
;
1118 reject
= fc_seq_lookup_recip(lp
, mp
, fp
);
1119 if (reject
== FC_RJT_NONE
) {
1120 sp
= fr_seq(fp
); /* sequence will be held */
1121 ep
= fc_seq_exch(sp
);
1124 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1125 fc_seq_send_ack(sp
, fp
);
1128 * Call the receive function.
1130 * The receive function may allocate a new sequence
1131 * over the old one, so we shouldn't change the
1132 * sequence after this.
1134 * The frame will be freed by the receive function.
1135 * If new exch resp handler is valid then call that
1139 ep
->resp(sp
, fp
, ep
->arg
);
1141 lp
->tt
.lport_recv(lp
, sp
, fp
);
1142 fc_exch_release(ep
); /* release from lookup */
1144 FC_LPORT_DBG(lp
, "exch/seq lookup failed: reject %x\n", reject
);
1150 * Handle receive where the other end is originating the sequence in
1151 * response to our exchange.
1153 static void fc_exch_recv_seq_resp(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1155 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1160 void (*resp
)(struct fc_seq
*, struct fc_frame
*fp
, void *arg
);
1164 ep
= fc_exch_find(mp
, ntohs(fh
->fh_ox_id
));
1166 atomic_inc(&mp
->stats
.xid_not_found
);
1169 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1170 atomic_inc(&mp
->stats
.xid_not_found
);
1173 if (ep
->rxid
== FC_XID_UNKNOWN
)
1174 ep
->rxid
= ntohs(fh
->fh_rx_id
);
1175 if (ep
->sid
!= 0 && ep
->sid
!= ntoh24(fh
->fh_d_id
)) {
1176 atomic_inc(&mp
->stats
.xid_not_found
);
1179 if (ep
->did
!= ntoh24(fh
->fh_s_id
) &&
1180 ep
->did
!= FC_FID_FLOGI
) {
1181 atomic_inc(&mp
->stats
.xid_not_found
);
1185 if (fc_sof_is_init(sof
)) {
1186 sp
= fc_seq_start_next(&ep
->seq
);
1187 sp
->id
= fh
->fh_seq_id
;
1188 sp
->ssb_stat
|= SSB_ST_RESP
;
1191 if (sp
->id
!= fh
->fh_seq_id
) {
1192 atomic_inc(&mp
->stats
.seq_not_found
);
1196 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1198 if (f_ctl
& FC_FC_SEQ_INIT
)
1199 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1201 if (fc_sof_needs_ack(sof
))
1202 fc_seq_send_ack(sp
, fp
);
1204 ex_resp_arg
= ep
->arg
;
1206 if (fh
->fh_type
!= FC_TYPE_FCP
&& fr_eof(fp
) == FC_EOF_T
&&
1207 (f_ctl
& (FC_FC_LAST_SEQ
| FC_FC_END_SEQ
)) ==
1208 (FC_FC_LAST_SEQ
| FC_FC_END_SEQ
)) {
1209 spin_lock_bh(&ep
->ex_lock
);
1210 rc
= fc_exch_done_locked(ep
);
1211 WARN_ON(fc_seq_exch(sp
) != ep
);
1212 spin_unlock_bh(&ep
->ex_lock
);
1218 * Call the receive function.
1219 * The sequence is held (has a refcnt) for us,
1220 * but not for the receive function.
1222 * The receive function may allocate a new sequence
1223 * over the old one, so we shouldn't change the
1224 * sequence after this.
1226 * The frame will be freed by the receive function.
1227 * If new exch resp handler is valid then call that
1231 resp(sp
, fp
, ex_resp_arg
);
1234 fc_exch_release(ep
);
1237 fc_exch_release(ep
);
1243 * Handle receive for a sequence where other end is responding to our sequence.
1245 static void fc_exch_recv_resp(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1249 sp
= fc_seq_lookup_orig(mp
, fp
); /* doesn't hold sequence */
1252 atomic_inc(&mp
->stats
.xid_not_found
);
1254 atomic_inc(&mp
->stats
.non_bls_resp
);
1260 * Handle the response to an ABTS for exchange or sequence.
1261 * This can be BA_ACC or BA_RJT.
1263 static void fc_exch_abts_resp(struct fc_exch
*ep
, struct fc_frame
*fp
)
1265 void (*resp
)(struct fc_seq
*, struct fc_frame
*fp
, void *arg
);
1267 struct fc_frame_header
*fh
;
1268 struct fc_ba_acc
*ap
;
1272 int rc
= 1, has_rec
= 0;
1274 fh
= fc_frame_header_get(fp
);
1275 FC_EXCH_DBG(ep
, "exch: BLS rctl %x - %s\n", fh
->fh_r_ctl
,
1276 fc_exch_rctl_name(fh
->fh_r_ctl
));
1278 if (cancel_delayed_work_sync(&ep
->timeout_work
))
1279 fc_exch_release(ep
); /* release from pending timer hold */
1281 spin_lock_bh(&ep
->ex_lock
);
1282 switch (fh
->fh_r_ctl
) {
1283 case FC_RCTL_BA_ACC
:
1284 ap
= fc_frame_payload_get(fp
, sizeof(*ap
));
1289 * Decide whether to establish a Recovery Qualifier.
1290 * We do this if there is a non-empty SEQ_CNT range and
1291 * SEQ_ID is the same as the one we aborted.
1293 low
= ntohs(ap
->ba_low_seq_cnt
);
1294 high
= ntohs(ap
->ba_high_seq_cnt
);
1295 if ((ep
->esb_stat
& ESB_ST_REC_QUAL
) == 0 &&
1296 (ap
->ba_seq_id_val
!= FC_BA_SEQ_ID_VAL
||
1297 ap
->ba_seq_id
== ep
->seq_id
) && low
!= high
) {
1298 ep
->esb_stat
|= ESB_ST_REC_QUAL
;
1299 fc_exch_hold(ep
); /* hold for recovery qualifier */
1303 case FC_RCTL_BA_RJT
:
1310 ex_resp_arg
= ep
->arg
;
1312 /* do we need to do some other checks here. Can we reuse more of
1313 * fc_exch_recv_seq_resp
1317 * do we want to check END_SEQ as well as LAST_SEQ here?
1319 if (ep
->fh_type
!= FC_TYPE_FCP
&&
1320 ntoh24(fh
->fh_f_ctl
) & FC_FC_LAST_SEQ
)
1321 rc
= fc_exch_done_locked(ep
);
1322 spin_unlock_bh(&ep
->ex_lock
);
1327 resp(sp
, fp
, ex_resp_arg
);
1332 fc_exch_timer_set(ep
, ep
->r_a_tov
);
1337 * Receive BLS sequence.
1338 * This is always a sequence initiated by the remote side.
1339 * We may be either the originator or recipient of the exchange.
1341 static void fc_exch_recv_bls(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1343 struct fc_frame_header
*fh
;
1347 fh
= fc_frame_header_get(fp
);
1348 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1351 ep
= fc_exch_find(mp
, (f_ctl
& FC_FC_EX_CTX
) ?
1352 ntohs(fh
->fh_ox_id
) : ntohs(fh
->fh_rx_id
));
1353 if (ep
&& (f_ctl
& FC_FC_SEQ_INIT
)) {
1354 spin_lock_bh(&ep
->ex_lock
);
1355 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1356 spin_unlock_bh(&ep
->ex_lock
);
1358 if (f_ctl
& FC_FC_SEQ_CTX
) {
1360 * A response to a sequence we initiated.
1361 * This should only be ACKs for class 2 or F.
1363 switch (fh
->fh_r_ctl
) {
1368 FC_EXCH_DBG(ep
, "BLS rctl %x - %s received",
1370 fc_exch_rctl_name(fh
->fh_r_ctl
));
1375 switch (fh
->fh_r_ctl
) {
1376 case FC_RCTL_BA_RJT
:
1377 case FC_RCTL_BA_ACC
:
1379 fc_exch_abts_resp(ep
, fp
);
1383 case FC_RCTL_BA_ABTS
:
1384 fc_exch_recv_abts(ep
, fp
);
1386 default: /* ignore junk */
1392 fc_exch_release(ep
); /* release hold taken by fc_exch_find */
1396 * Accept sequence with LS_ACC.
1397 * If this fails due to allocation or transmit congestion, assume the
1398 * originator will repeat the sequence.
1400 static void fc_seq_ls_acc(struct fc_seq
*req_sp
)
1403 struct fc_els_ls_acc
*acc
;
1404 struct fc_frame
*fp
;
1406 sp
= fc_seq_start_next(req_sp
);
1407 fp
= fc_frame_alloc(fc_seq_exch(sp
)->lp
, sizeof(*acc
));
1409 acc
= fc_frame_payload_get(fp
, sizeof(*acc
));
1410 memset(acc
, 0, sizeof(*acc
));
1411 acc
->la_cmd
= ELS_LS_ACC
;
1412 fc_seq_send_last(sp
, fp
, FC_RCTL_ELS_REP
, FC_TYPE_ELS
);
1417 * Reject sequence with ELS LS_RJT.
1418 * If this fails due to allocation or transmit congestion, assume the
1419 * originator will repeat the sequence.
1421 static void fc_seq_ls_rjt(struct fc_seq
*req_sp
, enum fc_els_rjt_reason reason
,
1422 enum fc_els_rjt_explan explan
)
1425 struct fc_els_ls_rjt
*rjt
;
1426 struct fc_frame
*fp
;
1428 sp
= fc_seq_start_next(req_sp
);
1429 fp
= fc_frame_alloc(fc_seq_exch(sp
)->lp
, sizeof(*rjt
));
1431 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
1432 memset(rjt
, 0, sizeof(*rjt
));
1433 rjt
->er_cmd
= ELS_LS_RJT
;
1434 rjt
->er_reason
= reason
;
1435 rjt
->er_explan
= explan
;
1436 fc_seq_send_last(sp
, fp
, FC_RCTL_ELS_REP
, FC_TYPE_ELS
);
1440 static void fc_exch_reset(struct fc_exch
*ep
)
1443 void (*resp
)(struct fc_seq
*, struct fc_frame
*, void *);
1447 spin_lock_bh(&ep
->ex_lock
);
1448 ep
->state
|= FC_EX_RST_CLEANUP
;
1450 * we really want to call del_timer_sync, but cannot due
1451 * to the lport calling with the lport lock held (some resp
1452 * functions can also grab the lport lock which could cause
1455 if (cancel_delayed_work(&ep
->timeout_work
))
1456 atomic_dec(&ep
->ex_refcnt
); /* drop hold for timer */
1459 if (ep
->esb_stat
& ESB_ST_REC_QUAL
)
1460 atomic_dec(&ep
->ex_refcnt
); /* drop hold for rec_qual */
1461 ep
->esb_stat
&= ~ESB_ST_REC_QUAL
;
1464 rc
= fc_exch_done_locked(ep
);
1465 spin_unlock_bh(&ep
->ex_lock
);
1470 resp(sp
, ERR_PTR(-FC_EX_CLOSED
), arg
);
1474 * fc_exch_pool_reset() - Resets an per cpu exches pool.
1475 * @lport: ptr to the local port
1476 * @pool: ptr to the per cpu exches pool
1477 * @sid: source FC ID
1478 * @did: destination FC ID
1480 * Resets an per cpu exches pool, releasing its all sequences
1481 * and exchanges. If sid is non-zero, then reset only exchanges
1482 * we sourced from that FID. If did is non-zero, reset only
1483 * exchanges destined to that FID.
1485 static void fc_exch_pool_reset(struct fc_lport
*lport
,
1486 struct fc_exch_pool
*pool
,
1490 struct fc_exch
*next
;
1492 spin_lock_bh(&pool
->lock
);
1494 list_for_each_entry_safe(ep
, next
, &pool
->ex_list
, ex_list
) {
1495 if ((lport
== ep
->lp
) &&
1496 (sid
== 0 || sid
== ep
->sid
) &&
1497 (did
== 0 || did
== ep
->did
)) {
1499 spin_unlock_bh(&pool
->lock
);
1503 fc_exch_release(ep
);
1504 spin_lock_bh(&pool
->lock
);
1507 * must restart loop incase while lock
1508 * was down multiple eps were released.
1513 spin_unlock_bh(&pool
->lock
);
1517 * fc_exch_mgr_reset() - Resets all EMs of a lport
1518 * @lport: ptr to the local port
1519 * @sid: source FC ID
1520 * @did: destination FC ID
1522 * Reset all EMs of a lport, releasing its all sequences and
1523 * exchanges. If sid is non-zero, then reset only exchanges
1524 * we sourced from that FID. If did is non-zero, reset only
1525 * exchanges destined to that FID.
1527 void fc_exch_mgr_reset(struct fc_lport
*lport
, u32 sid
, u32 did
)
1529 struct fc_exch_mgr_anchor
*ema
;
1532 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
) {
1533 for_each_possible_cpu(cpu
)
1534 fc_exch_pool_reset(lport
,
1535 per_cpu_ptr(ema
->mp
->pool
, cpu
),
1539 EXPORT_SYMBOL(fc_exch_mgr_reset
);
1542 * Handle incoming ELS REC - Read Exchange Concise.
1543 * Note that the requesting port may be different than the S_ID in the request.
1545 static void fc_exch_els_rec(struct fc_seq
*sp
, struct fc_frame
*rfp
)
1547 struct fc_frame
*fp
;
1549 struct fc_exch_mgr
*em
;
1550 struct fc_els_rec
*rp
;
1551 struct fc_els_rec_acc
*acc
;
1552 enum fc_els_rjt_reason reason
= ELS_RJT_LOGIC
;
1553 enum fc_els_rjt_explan explan
;
1558 rp
= fc_frame_payload_get(rfp
, sizeof(*rp
));
1559 explan
= ELS_EXPL_INV_LEN
;
1562 sid
= ntoh24(rp
->rec_s_id
);
1563 rxid
= ntohs(rp
->rec_rx_id
);
1564 oxid
= ntohs(rp
->rec_ox_id
);
1567 * Currently it's hard to find the local S_ID from the exchange
1568 * manager. This will eventually be fixed, but for now it's easier
1569 * to lookup the subject exchange twice, once as if we were
1570 * the initiator, and then again if we weren't.
1572 em
= fc_seq_exch(sp
)->em
;
1573 ep
= fc_exch_find(em
, oxid
);
1574 explan
= ELS_EXPL_OXID_RXID
;
1575 if (ep
&& ep
->oid
== sid
) {
1576 if (ep
->rxid
!= FC_XID_UNKNOWN
&&
1577 rxid
!= FC_XID_UNKNOWN
&&
1582 fc_exch_release(ep
);
1584 if (rxid
!= FC_XID_UNKNOWN
)
1585 ep
= fc_exch_find(em
, rxid
);
1590 fp
= fc_frame_alloc(fc_seq_exch(sp
)->lp
, sizeof(*acc
));
1595 sp
= fc_seq_start_next(sp
);
1596 acc
= fc_frame_payload_get(fp
, sizeof(*acc
));
1597 memset(acc
, 0, sizeof(*acc
));
1598 acc
->reca_cmd
= ELS_LS_ACC
;
1599 acc
->reca_ox_id
= rp
->rec_ox_id
;
1600 memcpy(acc
->reca_ofid
, rp
->rec_s_id
, 3);
1601 acc
->reca_rx_id
= htons(ep
->rxid
);
1602 if (ep
->sid
== ep
->oid
)
1603 hton24(acc
->reca_rfid
, ep
->did
);
1605 hton24(acc
->reca_rfid
, ep
->sid
);
1606 acc
->reca_fc4value
= htonl(ep
->seq
.rec_data
);
1607 acc
->reca_e_stat
= htonl(ep
->esb_stat
& (ESB_ST_RESP
|
1610 sp
= fc_seq_start_next(sp
);
1611 fc_seq_send_last(sp
, fp
, FC_RCTL_ELS_REP
, FC_TYPE_ELS
);
1613 fc_exch_release(ep
);
1618 fc_exch_release(ep
);
1620 fc_seq_ls_rjt(sp
, reason
, explan
);
1625 * Handle response from RRQ.
1626 * Not much to do here, really.
1627 * Should report errors.
1629 * TODO: fix error handler.
1631 static void fc_exch_rrq_resp(struct fc_seq
*sp
, struct fc_frame
*fp
, void *arg
)
1633 struct fc_exch
*aborted_ep
= arg
;
1637 int err
= PTR_ERR(fp
);
1639 if (err
== -FC_EX_CLOSED
|| err
== -FC_EX_TIMEOUT
)
1641 FC_EXCH_DBG(aborted_ep
, "Cannot process RRQ, "
1642 "frame error %d\n", err
);
1646 op
= fc_frame_payload_op(fp
);
1651 FC_EXCH_DBG(aborted_ep
, "LS_RJT for RRQ");
1656 FC_EXCH_DBG(aborted_ep
, "unexpected response op %x "
1662 fc_exch_done(&aborted_ep
->seq
);
1663 /* drop hold for rec qual */
1664 fc_exch_release(aborted_ep
);
1668 * Send ELS RRQ - Reinstate Recovery Qualifier.
1669 * This tells the remote port to stop blocking the use of
1670 * the exchange and the seq_cnt range.
1672 static void fc_exch_rrq(struct fc_exch
*ep
)
1674 struct fc_lport
*lp
;
1675 struct fc_els_rrq
*rrq
;
1676 struct fc_frame
*fp
;
1681 fp
= fc_frame_alloc(lp
, sizeof(*rrq
));
1685 rrq
= fc_frame_payload_get(fp
, sizeof(*rrq
));
1686 memset(rrq
, 0, sizeof(*rrq
));
1687 rrq
->rrq_cmd
= ELS_RRQ
;
1688 hton24(rrq
->rrq_s_id
, ep
->sid
);
1689 rrq
->rrq_ox_id
= htons(ep
->oxid
);
1690 rrq
->rrq_rx_id
= htons(ep
->rxid
);
1693 if (ep
->esb_stat
& ESB_ST_RESP
)
1696 fc_fill_fc_hdr(fp
, FC_RCTL_ELS_REQ
, did
,
1697 fc_host_port_id(lp
->host
), FC_TYPE_ELS
,
1698 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
1700 if (fc_exch_seq_send(lp
, fp
, fc_exch_rrq_resp
, NULL
, ep
, lp
->e_d_tov
))
1704 spin_lock_bh(&ep
->ex_lock
);
1705 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
)) {
1706 spin_unlock_bh(&ep
->ex_lock
);
1707 /* drop hold for rec qual */
1708 fc_exch_release(ep
);
1711 ep
->esb_stat
|= ESB_ST_REC_QUAL
;
1712 fc_exch_timer_set_locked(ep
, ep
->r_a_tov
);
1713 spin_unlock_bh(&ep
->ex_lock
);
1718 * Handle incoming ELS RRQ - Reset Recovery Qualifier.
1720 static void fc_exch_els_rrq(struct fc_seq
*sp
, struct fc_frame
*fp
)
1722 struct fc_exch
*ep
; /* request or subject exchange */
1723 struct fc_els_rrq
*rp
;
1726 enum fc_els_rjt_explan explan
;
1728 rp
= fc_frame_payload_get(fp
, sizeof(*rp
));
1729 explan
= ELS_EXPL_INV_LEN
;
1734 * lookup subject exchange.
1736 ep
= fc_seq_exch(sp
);
1737 sid
= ntoh24(rp
->rrq_s_id
); /* subject source */
1738 xid
= ep
->did
== sid
? ntohs(rp
->rrq_ox_id
) : ntohs(rp
->rrq_rx_id
);
1739 ep
= fc_exch_find(ep
->em
, xid
);
1741 explan
= ELS_EXPL_OXID_RXID
;
1744 spin_lock_bh(&ep
->ex_lock
);
1745 if (ep
->oxid
!= ntohs(rp
->rrq_ox_id
))
1747 if (ep
->rxid
!= ntohs(rp
->rrq_rx_id
) &&
1748 ep
->rxid
!= FC_XID_UNKNOWN
)
1750 explan
= ELS_EXPL_SID
;
1755 * Clear Recovery Qualifier state, and cancel timer if complete.
1757 if (ep
->esb_stat
& ESB_ST_REC_QUAL
) {
1758 ep
->esb_stat
&= ~ESB_ST_REC_QUAL
;
1759 atomic_dec(&ep
->ex_refcnt
); /* drop hold for rec qual */
1761 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1762 if (cancel_delayed_work(&ep
->timeout_work
))
1763 atomic_dec(&ep
->ex_refcnt
); /* drop timer hold */
1766 spin_unlock_bh(&ep
->ex_lock
);
1776 spin_unlock_bh(&ep
->ex_lock
);
1777 fc_exch_release(ep
); /* drop hold from fc_exch_find */
1779 fc_seq_ls_rjt(sp
, ELS_RJT_LOGIC
, explan
);
1783 struct fc_exch_mgr_anchor
*fc_exch_mgr_add(struct fc_lport
*lport
,
1784 struct fc_exch_mgr
*mp
,
1785 bool (*match
)(struct fc_frame
*))
1787 struct fc_exch_mgr_anchor
*ema
;
1789 ema
= kmalloc(sizeof(*ema
), GFP_ATOMIC
);
1795 /* add EM anchor to EM anchors list */
1796 list_add_tail(&ema
->ema_list
, &lport
->ema_list
);
1797 kref_get(&mp
->kref
);
1800 EXPORT_SYMBOL(fc_exch_mgr_add
);
1802 static void fc_exch_mgr_destroy(struct kref
*kref
)
1804 struct fc_exch_mgr
*mp
= container_of(kref
, struct fc_exch_mgr
, kref
);
1806 mempool_destroy(mp
->ep_pool
);
1807 free_percpu(mp
->pool
);
1811 void fc_exch_mgr_del(struct fc_exch_mgr_anchor
*ema
)
1813 /* remove EM anchor from EM anchors list */
1814 list_del(&ema
->ema_list
);
1815 kref_put(&ema
->mp
->kref
, fc_exch_mgr_destroy
);
1818 EXPORT_SYMBOL(fc_exch_mgr_del
);
1820 struct fc_exch_mgr
*fc_exch_mgr_alloc(struct fc_lport
*lp
,
1821 enum fc_class
class,
1822 u16 min_xid
, u16 max_xid
,
1823 bool (*match
)(struct fc_frame
*))
1825 struct fc_exch_mgr
*mp
;
1826 u16 pool_exch_range
;
1829 struct fc_exch_pool
*pool
;
1831 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
||
1832 (min_xid
& fc_cpu_mask
) != 0) {
1833 FC_LPORT_DBG(lp
, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
1839 * allocate memory for EM
1841 mp
= kzalloc(sizeof(struct fc_exch_mgr
), GFP_ATOMIC
);
1846 /* adjust em exch xid range for offload */
1847 mp
->min_xid
= min_xid
;
1848 mp
->max_xid
= max_xid
;
1850 mp
->ep_pool
= mempool_create_slab_pool(2, fc_em_cachep
);
1855 * Setup per cpu exch pool with entire exchange id range equally
1856 * divided across all cpus. The exch pointers array memory is
1857 * allocated for exch range per pool.
1859 pool_exch_range
= (mp
->max_xid
- mp
->min_xid
+ 1) / (fc_cpu_mask
+ 1);
1860 mp
->pool_max_index
= pool_exch_range
- 1;
1863 * Allocate and initialize per cpu exch pool
1865 pool_size
= sizeof(*pool
) + pool_exch_range
* sizeof(struct fc_exch
*);
1866 mp
->pool
= __alloc_percpu(pool_size
, __alignof__(struct fc_exch_pool
));
1869 for_each_possible_cpu(cpu
) {
1870 pool
= per_cpu_ptr(mp
->pool
, cpu
);
1871 spin_lock_init(&pool
->lock
);
1872 INIT_LIST_HEAD(&pool
->ex_list
);
1875 kref_init(&mp
->kref
);
1876 if (!fc_exch_mgr_add(lp
, mp
, match
)) {
1877 free_percpu(mp
->pool
);
1882 * Above kref_init() sets mp->kref to 1 and then
1883 * call to fc_exch_mgr_add incremented mp->kref again,
1884 * so adjust that extra increment.
1886 kref_put(&mp
->kref
, fc_exch_mgr_destroy
);
1890 mempool_destroy(mp
->ep_pool
);
1895 EXPORT_SYMBOL(fc_exch_mgr_alloc
);
1897 void fc_exch_mgr_free(struct fc_lport
*lport
)
1899 struct fc_exch_mgr_anchor
*ema
, *next
;
1901 list_for_each_entry_safe(ema
, next
, &lport
->ema_list
, ema_list
)
1902 fc_exch_mgr_del(ema
);
1904 EXPORT_SYMBOL(fc_exch_mgr_free
);
1907 struct fc_seq
*fc_exch_seq_send(struct fc_lport
*lp
,
1908 struct fc_frame
*fp
,
1909 void (*resp
)(struct fc_seq
*,
1910 struct fc_frame
*fp
,
1912 void (*destructor
)(struct fc_seq
*, void *),
1913 void *arg
, u32 timer_msec
)
1916 struct fc_seq
*sp
= NULL
;
1917 struct fc_frame_header
*fh
;
1920 ep
= fc_exch_alloc(lp
, fp
);
1925 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1926 fh
= fc_frame_header_get(fp
);
1927 fc_exch_set_addr(ep
, ntoh24(fh
->fh_s_id
), ntoh24(fh
->fh_d_id
));
1929 ep
->destructor
= destructor
;
1931 ep
->r_a_tov
= FC_DEF_R_A_TOV
;
1935 ep
->fh_type
= fh
->fh_type
; /* save for possbile timeout handling */
1936 ep
->f_ctl
= ntoh24(fh
->fh_f_ctl
);
1937 fc_exch_setup_hdr(ep
, fp
, ep
->f_ctl
);
1940 if (ep
->xid
<= lp
->lro_xid
)
1941 fc_fcp_ddp_setup(fr_fsp(fp
), ep
->xid
);
1943 if (unlikely(lp
->tt
.frame_send(lp
, fp
)))
1947 fc_exch_timer_set_locked(ep
, timer_msec
);
1948 ep
->f_ctl
&= ~FC_FC_FIRST_SEQ
; /* not first seq */
1950 if (ep
->f_ctl
& FC_FC_SEQ_INIT
)
1951 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
1952 spin_unlock_bh(&ep
->ex_lock
);
1955 rc
= fc_exch_done_locked(ep
);
1956 spin_unlock_bh(&ep
->ex_lock
);
1961 EXPORT_SYMBOL(fc_exch_seq_send
);
1966 void fc_exch_recv(struct fc_lport
*lp
, struct fc_frame
*fp
)
1968 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1969 struct fc_exch_mgr_anchor
*ema
;
1970 u32 f_ctl
, found
= 0;
1974 if (!lp
|| lp
->state
== LPORT_ST_DISABLED
) {
1975 FC_LPORT_DBG(lp
, "Receiving frames for an lport that "
1976 "has not been initialized correctly\n");
1981 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1982 oxid
= ntohs(fh
->fh_ox_id
);
1983 if (f_ctl
& FC_FC_EX_CTX
) {
1984 list_for_each_entry(ema
, &lp
->ema_list
, ema_list
) {
1985 if ((oxid
>= ema
->mp
->min_xid
) &&
1986 (oxid
<= ema
->mp
->max_xid
)) {
1993 FC_LPORT_DBG(lp
, "Received response for out "
1994 "of range oxid:%hx\n", oxid
);
1999 ema
= list_entry(lp
->ema_list
.prev
, typeof(*ema
), ema_list
);
2002 * If frame is marked invalid, just drop it.
2004 switch (fr_eof(fp
)) {
2006 if (f_ctl
& FC_FC_END_SEQ
)
2007 skb_trim(fp_skb(fp
), fr_len(fp
) - FC_FC_FILL(f_ctl
));
2010 if (fh
->fh_type
== FC_TYPE_BLS
)
2011 fc_exch_recv_bls(ema
->mp
, fp
);
2012 else if ((f_ctl
& (FC_FC_EX_CTX
| FC_FC_SEQ_CTX
)) ==
2014 fc_exch_recv_seq_resp(ema
->mp
, fp
);
2015 else if (f_ctl
& FC_FC_SEQ_CTX
)
2016 fc_exch_recv_resp(ema
->mp
, fp
);
2018 fc_exch_recv_req(lp
, ema
->mp
, fp
);
2021 FC_LPORT_DBG(lp
, "dropping invalid frame (eof %x)", fr_eof(fp
));
2025 EXPORT_SYMBOL(fc_exch_recv
);
2027 int fc_exch_init(struct fc_lport
*lp
)
2029 if (!lp
->tt
.seq_start_next
)
2030 lp
->tt
.seq_start_next
= fc_seq_start_next
;
2032 if (!lp
->tt
.exch_seq_send
)
2033 lp
->tt
.exch_seq_send
= fc_exch_seq_send
;
2035 if (!lp
->tt
.seq_send
)
2036 lp
->tt
.seq_send
= fc_seq_send
;
2038 if (!lp
->tt
.seq_els_rsp_send
)
2039 lp
->tt
.seq_els_rsp_send
= fc_seq_els_rsp_send
;
2041 if (!lp
->tt
.exch_done
)
2042 lp
->tt
.exch_done
= fc_exch_done
;
2044 if (!lp
->tt
.exch_mgr_reset
)
2045 lp
->tt
.exch_mgr_reset
= fc_exch_mgr_reset
;
2047 if (!lp
->tt
.seq_exch_abort
)
2048 lp
->tt
.seq_exch_abort
= fc_seq_exch_abort
;
2051 * Initialize fc_cpu_mask and fc_cpu_order. The
2052 * fc_cpu_mask is set for nr_cpu_ids rounded up
2053 * to order of 2's * power and order is stored
2054 * in fc_cpu_order as this is later required in
2055 * mapping between an exch id and exch array index
2056 * in per cpu exch pool.
2058 * This round up is required to align fc_cpu_mask
2059 * to exchange id's lower bits such that all incoming
2060 * frames of an exchange gets delivered to the same
2061 * cpu on which exchange originated by simple bitwise
2062 * AND operation between fc_cpu_mask and exchange id.
2066 while (fc_cpu_mask
< nr_cpu_ids
) {
2074 EXPORT_SYMBOL(fc_exch_init
);
2076 int fc_setup_exch_mgr(void)
2078 fc_em_cachep
= kmem_cache_create("libfc_em", sizeof(struct fc_exch
),
2079 0, SLAB_HWCACHE_ALIGN
, NULL
);
2085 void fc_destroy_exch_mgr(void)
2087 kmem_cache_destroy(fc_em_cachep
);