2 * Copyright(c) 2007 Intel Corporation. All rights reserved.
3 * Copyright(c) 2008 Red Hat, Inc. All rights reserved.
4 * Copyright(c) 2008 Mike Christie
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
19 * Maintained at www.Open-FCoE.org
23 * Fibre Channel exchange and sequence handling.
26 #include <linux/timer.h>
27 #include <linux/slab.h>
28 #include <linux/err.h>
29 #include <linux/export.h>
30 #include <linux/log2.h>
32 #include <scsi/fc/fc_fc2.h>
34 #include <scsi/libfc.h>
35 #include <scsi/fc_encode.h>
39 u16 fc_cpu_mask
; /* cpu mask for possible cpus */
40 EXPORT_SYMBOL(fc_cpu_mask
);
41 static u16 fc_cpu_order
; /* 2's power to represent total possible cpus */
42 static struct kmem_cache
*fc_em_cachep
; /* cache for exchanges */
43 static struct workqueue_struct
*fc_exch_workqueue
;
46 * Structure and function definitions for managing Fibre Channel Exchanges
49 * The three primary structures used here are fc_exch_mgr, fc_exch, and fc_seq.
51 * fc_exch_mgr holds the exchange state for an N port
53 * fc_exch holds state for one exchange and links to its active sequence.
55 * fc_seq holds the state for an individual sequence.
59 * struct fc_exch_pool - Per cpu exchange pool
60 * @next_index: Next possible free exchange index
61 * @total_exches: Total allocated exchanges
62 * @lock: Exch pool lock
63 * @ex_list: List of exchanges
65 * This structure manages per cpu exchanges in array of exchange pointers.
66 * This array is allocated followed by struct fc_exch_pool memory for
67 * assigned range of exchanges to per cpu pool.
71 struct list_head ex_list
;
75 /* two cache of free slot in exch array */
78 } ____cacheline_aligned_in_smp
;
81 * struct fc_exch_mgr - The Exchange Manager (EM).
82 * @class: Default class for new sequences
83 * @kref: Reference counter
84 * @min_xid: Minimum exchange ID
85 * @max_xid: Maximum exchange ID
86 * @ep_pool: Reserved exchange pointers
87 * @pool_max_index: Max exch array index in exch pool
88 * @pool: Per cpu exch pool
89 * @stats: Statistics structure
91 * This structure is the center for creating exchanges and sequences.
92 * It manages the allocation of exchange IDs.
95 struct fc_exch_pool __percpu
*pool
;
97 struct fc_lport
*lport
;
105 atomic_t no_free_exch
;
106 atomic_t no_free_exch_xid
;
107 atomic_t xid_not_found
;
109 atomic_t seq_not_found
;
110 atomic_t non_bls_resp
;
115 * struct fc_exch_mgr_anchor - primary structure for list of EMs
116 * @ema_list: Exchange Manager Anchor list
117 * @mp: Exchange Manager associated with this anchor
118 * @match: Routine to determine if this anchor's EM should be used
120 * When walking the list of anchors the match routine will be called
121 * for each anchor to determine if that EM should be used. The last
122 * anchor in the list will always match to handle any exchanges not
123 * handled by other EMs. The non-default EMs would be added to the
124 * anchor list by HW that provides offloads.
126 struct fc_exch_mgr_anchor
{
127 struct list_head ema_list
;
128 struct fc_exch_mgr
*mp
;
129 bool (*match
)(struct fc_frame
*);
132 static void fc_exch_rrq(struct fc_exch
*);
133 static void fc_seq_ls_acc(struct fc_frame
*);
134 static void fc_seq_ls_rjt(struct fc_frame
*, enum fc_els_rjt_reason
,
135 enum fc_els_rjt_explan
);
136 static void fc_exch_els_rec(struct fc_frame
*);
137 static void fc_exch_els_rrq(struct fc_frame
*);
140 * Internal implementation notes.
142 * The exchange manager is one by default in libfc but LLD may choose
143 * to have one per CPU. The sequence manager is one per exchange manager
144 * and currently never separated.
146 * Section 9.8 in FC-FS-2 specifies: "The SEQ_ID is a one-byte field
147 * assigned by the Sequence Initiator that shall be unique for a specific
148 * D_ID and S_ID pair while the Sequence is open." Note that it isn't
149 * qualified by exchange ID, which one might think it would be.
150 * In practice this limits the number of open sequences and exchanges to 256
151 * per session. For most targets we could treat this limit as per exchange.
153 * The exchange and its sequence are freed when the last sequence is received.
154 * It's possible for the remote port to leave an exchange open without
155 * sending any sequences.
157 * Notes on reference counts:
159 * Exchanges are reference counted and exchange gets freed when the reference
160 * count becomes zero.
163 * Sequences are timed out for E_D_TOV and R_A_TOV.
165 * Sequence event handling:
167 * The following events may occur on initiator sequences:
170 * For now, the whole thing is sent.
172 * This applies only to class F.
173 * The sequence is marked complete.
175 * The upper layer calls fc_exch_done() when done
176 * with exchange and sequence tuple.
177 * RX-inferred completion.
178 * When we receive the next sequence on the same exchange, we can
179 * retire the previous sequence ID. (XXX not implemented).
181 * R_A_TOV frees the sequence ID. If we're waiting for ACK,
182 * E_D_TOV causes abort and calls upper layer response handler
183 * with FC_EX_TIMEOUT error.
189 * The following events may occur on recipient sequences:
192 * Allocate sequence for first frame received.
193 * Hold during receive handler.
194 * Release when final frame received.
195 * Keep status of last N of these for the ELS RES command. XXX TBD.
197 * Deallocate sequence
201 * For now, we neglect conditions where only part of a sequence was
202 * received or transmitted, or where out-of-order receipt is detected.
208 * The EM code run in a per-CPU worker thread.
210 * To protect against concurrency between a worker thread code and timers,
211 * sequence allocation and deallocation must be locked.
212 * - exchange refcnt can be done atomicly without locks.
213 * - sequence allocation must be locked by exch lock.
214 * - If the EM pool lock and ex_lock must be taken at the same time, then the
215 * EM pool lock must be taken before the ex_lock.
219 * opcode names for debugging.
221 static char *fc_exch_rctl_names
[] = FC_RCTL_NAMES_INIT
;
224 * fc_exch_name_lookup() - Lookup name by opcode
225 * @op: Opcode to be looked up
226 * @table: Opcode/name table
227 * @max_index: Index not to be exceeded
229 * This routine is used to determine a human-readable string identifying
232 static inline const char *fc_exch_name_lookup(unsigned int op
, char **table
,
233 unsigned int max_index
)
235 const char *name
= NULL
;
245 * fc_exch_rctl_name() - Wrapper routine for fc_exch_name_lookup()
246 * @op: The opcode to be looked up
248 static const char *fc_exch_rctl_name(unsigned int op
)
250 return fc_exch_name_lookup(op
, fc_exch_rctl_names
,
251 ARRAY_SIZE(fc_exch_rctl_names
));
255 * fc_exch_hold() - Increment an exchange's reference count
256 * @ep: Echange to be held
258 static inline void fc_exch_hold(struct fc_exch
*ep
)
260 atomic_inc(&ep
->ex_refcnt
);
264 * fc_exch_setup_hdr() - Initialize a FC header by initializing some fields
265 * and determine SOF and EOF.
266 * @ep: The exchange to that will use the header
267 * @fp: The frame whose header is to be modified
268 * @f_ctl: F_CTL bits that will be used for the frame header
270 * The fields initialized by this routine are: fh_ox_id, fh_rx_id,
271 * fh_seq_id, fh_seq_cnt and the SOF and EOF.
273 static void fc_exch_setup_hdr(struct fc_exch
*ep
, struct fc_frame
*fp
,
276 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
279 fr_sof(fp
) = ep
->class;
281 fr_sof(fp
) = fc_sof_normal(ep
->class);
283 if (f_ctl
& FC_FC_END_SEQ
) {
284 fr_eof(fp
) = FC_EOF_T
;
285 if (fc_sof_needs_ack(ep
->class))
286 fr_eof(fp
) = FC_EOF_N
;
289 * The number of fill bytes to make the length a 4-byte
290 * multiple is the low order 2-bits of the f_ctl.
291 * The fill itself will have been cleared by the frame
293 * After this, the length will be even, as expected by
296 fill
= fr_len(fp
) & 3;
299 /* TODO, this may be a problem with fragmented skb */
300 skb_put(fp_skb(fp
), fill
);
301 hton24(fh
->fh_f_ctl
, f_ctl
| fill
);
304 WARN_ON(fr_len(fp
) % 4 != 0); /* no pad to non last frame */
305 fr_eof(fp
) = FC_EOF_N
;
308 /* Initialize remaining fh fields from fc_fill_fc_hdr */
309 fh
->fh_ox_id
= htons(ep
->oxid
);
310 fh
->fh_rx_id
= htons(ep
->rxid
);
311 fh
->fh_seq_id
= ep
->seq
.id
;
312 fh
->fh_seq_cnt
= htons(ep
->seq
.cnt
);
316 * fc_exch_release() - Decrement an exchange's reference count
317 * @ep: Exchange to be released
319 * If the reference count reaches zero and the exchange is complete,
322 static void fc_exch_release(struct fc_exch
*ep
)
324 struct fc_exch_mgr
*mp
;
326 if (atomic_dec_and_test(&ep
->ex_refcnt
)) {
329 ep
->destructor(&ep
->seq
, ep
->arg
);
330 WARN_ON(!(ep
->esb_stat
& ESB_ST_COMPLETE
));
331 mempool_free(ep
, mp
->ep_pool
);
336 * fc_exch_timer_cancel() - cancel exch timer
337 * @ep: The exchange whose timer to be canceled
339 static inline void fc_exch_timer_cancel(struct fc_exch
*ep
)
341 if (cancel_delayed_work(&ep
->timeout_work
)) {
342 FC_EXCH_DBG(ep
, "Exchange timer canceled\n");
343 atomic_dec(&ep
->ex_refcnt
); /* drop hold for timer */
348 * fc_exch_timer_set_locked() - Start a timer for an exchange w/ the
349 * the exchange lock held
350 * @ep: The exchange whose timer will start
351 * @timer_msec: The timeout period
353 * Used for upper level protocols to time out the exchange.
354 * The timer is cancelled when it fires or when the exchange completes.
356 static inline void fc_exch_timer_set_locked(struct fc_exch
*ep
,
357 unsigned int timer_msec
)
359 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
))
362 FC_EXCH_DBG(ep
, "Exchange timer armed : %d msecs\n", timer_msec
);
364 fc_exch_hold(ep
); /* hold for timer */
365 if (!queue_delayed_work(fc_exch_workqueue
, &ep
->timeout_work
,
366 msecs_to_jiffies(timer_msec
))) {
367 FC_EXCH_DBG(ep
, "Exchange already queued\n");
373 * fc_exch_timer_set() - Lock the exchange and set the timer
374 * @ep: The exchange whose timer will start
375 * @timer_msec: The timeout period
377 static void fc_exch_timer_set(struct fc_exch
*ep
, unsigned int timer_msec
)
379 spin_lock_bh(&ep
->ex_lock
);
380 fc_exch_timer_set_locked(ep
, timer_msec
);
381 spin_unlock_bh(&ep
->ex_lock
);
385 * fc_exch_done_locked() - Complete an exchange with the exchange lock held
386 * @ep: The exchange that is complete
388 * Note: May sleep if invoked from outside a response handler.
390 static int fc_exch_done_locked(struct fc_exch
*ep
)
395 * We must check for completion in case there are two threads
396 * tyring to complete this. But the rrq code will reuse the
397 * ep, and in that case we only clear the resp and set it as
398 * complete, so it can be reused by the timer to send the rrq.
400 if (ep
->state
& FC_EX_DONE
)
402 ep
->esb_stat
|= ESB_ST_COMPLETE
;
404 if (!(ep
->esb_stat
& ESB_ST_REC_QUAL
)) {
405 ep
->state
|= FC_EX_DONE
;
406 fc_exch_timer_cancel(ep
);
412 static struct fc_exch fc_quarantine_exch
;
415 * fc_exch_ptr_get() - Return an exchange from an exchange pool
416 * @pool: Exchange Pool to get an exchange from
417 * @index: Index of the exchange within the pool
419 * Use the index to get an exchange from within an exchange pool. exches
420 * will point to an array of exchange pointers. The index will select
421 * the exchange within the array.
423 static inline struct fc_exch
*fc_exch_ptr_get(struct fc_exch_pool
*pool
,
426 struct fc_exch
**exches
= (struct fc_exch
**)(pool
+ 1);
427 return exches
[index
];
431 * fc_exch_ptr_set() - Assign an exchange to a slot in an exchange pool
432 * @pool: The pool to assign the exchange to
433 * @index: The index in the pool where the exchange will be assigned
434 * @ep: The exchange to assign to the pool
436 static inline void fc_exch_ptr_set(struct fc_exch_pool
*pool
, u16 index
,
439 ((struct fc_exch
**)(pool
+ 1))[index
] = ep
;
443 * fc_exch_delete() - Delete an exchange
444 * @ep: The exchange to be deleted
446 static void fc_exch_delete(struct fc_exch
*ep
)
448 struct fc_exch_pool
*pool
;
452 spin_lock_bh(&pool
->lock
);
453 WARN_ON(pool
->total_exches
<= 0);
454 pool
->total_exches
--;
456 /* update cache of free slot */
457 index
= (ep
->xid
- ep
->em
->min_xid
) >> fc_cpu_order
;
458 if (!(ep
->state
& FC_EX_QUARANTINE
)) {
459 if (pool
->left
== FC_XID_UNKNOWN
)
461 else if (pool
->right
== FC_XID_UNKNOWN
)
464 pool
->next_index
= index
;
465 fc_exch_ptr_set(pool
, index
, NULL
);
467 fc_exch_ptr_set(pool
, index
, &fc_quarantine_exch
);
469 list_del(&ep
->ex_list
);
470 spin_unlock_bh(&pool
->lock
);
471 fc_exch_release(ep
); /* drop hold for exch in mp */
474 static int fc_seq_send_locked(struct fc_lport
*lport
, struct fc_seq
*sp
,
478 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
481 u8 fh_type
= fh
->fh_type
;
483 ep
= fc_seq_exch(sp
);
485 if (ep
->esb_stat
& (ESB_ST_COMPLETE
| ESB_ST_ABNORMAL
)) {
490 WARN_ON(!(ep
->esb_stat
& ESB_ST_SEQ_INIT
));
492 f_ctl
= ntoh24(fh
->fh_f_ctl
);
493 fc_exch_setup_hdr(ep
, fp
, f_ctl
);
494 fr_encaps(fp
) = ep
->encaps
;
497 * update sequence count if this frame is carrying
498 * multiple FC frames when sequence offload is enabled
501 if (fr_max_payload(fp
))
502 sp
->cnt
+= DIV_ROUND_UP((fr_len(fp
) - sizeof(*fh
)),
510 error
= lport
->tt
.frame_send(lport
, fp
);
512 if (fh_type
== FC_TYPE_BLS
)
516 * Update the exchange and sequence flags,
517 * assuming all frames for the sequence have been sent.
518 * We can only be called to send once for each sequence.
520 ep
->f_ctl
= f_ctl
& ~FC_FC_FIRST_SEQ
; /* not first seq */
521 if (f_ctl
& FC_FC_SEQ_INIT
)
522 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
528 * fc_seq_send() - Send a frame using existing sequence/exchange pair
529 * @lport: The local port that the exchange will be sent on
530 * @sp: The sequence to be sent
531 * @fp: The frame to be sent on the exchange
533 * Note: The frame will be freed either by a direct call to fc_frame_free(fp)
534 * or indirectly by calling libfc_function_template.frame_send().
536 int fc_seq_send(struct fc_lport
*lport
, struct fc_seq
*sp
, struct fc_frame
*fp
)
540 ep
= fc_seq_exch(sp
);
541 spin_lock_bh(&ep
->ex_lock
);
542 error
= fc_seq_send_locked(lport
, sp
, fp
);
543 spin_unlock_bh(&ep
->ex_lock
);
546 EXPORT_SYMBOL(fc_seq_send
);
549 * fc_seq_alloc() - Allocate a sequence for a given exchange
550 * @ep: The exchange to allocate a new sequence for
551 * @seq_id: The sequence ID to be used
553 * We don't support multiple originated sequences on the same exchange.
554 * By implication, any previously originated sequence on this exchange
555 * is complete, and we reallocate the same sequence.
557 static struct fc_seq
*fc_seq_alloc(struct fc_exch
*ep
, u8 seq_id
)
569 * fc_seq_start_next_locked() - Allocate a new sequence on the same
570 * exchange as the supplied sequence
571 * @sp: The sequence/exchange to get a new sequence for
573 static struct fc_seq
*fc_seq_start_next_locked(struct fc_seq
*sp
)
575 struct fc_exch
*ep
= fc_seq_exch(sp
);
577 sp
= fc_seq_alloc(ep
, ep
->seq_id
++);
578 FC_EXCH_DBG(ep
, "f_ctl %6x seq %2x\n",
584 * fc_seq_start_next() - Lock the exchange and get a new sequence
585 * for a given sequence/exchange pair
586 * @sp: The sequence/exchange to get a new exchange for
588 struct fc_seq
*fc_seq_start_next(struct fc_seq
*sp
)
590 struct fc_exch
*ep
= fc_seq_exch(sp
);
592 spin_lock_bh(&ep
->ex_lock
);
593 sp
= fc_seq_start_next_locked(sp
);
594 spin_unlock_bh(&ep
->ex_lock
);
598 EXPORT_SYMBOL(fc_seq_start_next
);
601 * Set the response handler for the exchange associated with a sequence.
603 * Note: May sleep if invoked from outside a response handler.
605 void fc_seq_set_resp(struct fc_seq
*sp
,
606 void (*resp
)(struct fc_seq
*, struct fc_frame
*, void *),
609 struct fc_exch
*ep
= fc_seq_exch(sp
);
612 spin_lock_bh(&ep
->ex_lock
);
613 while (ep
->resp_active
&& ep
->resp_task
!= current
) {
614 prepare_to_wait(&ep
->resp_wq
, &wait
, TASK_UNINTERRUPTIBLE
);
615 spin_unlock_bh(&ep
->ex_lock
);
619 spin_lock_bh(&ep
->ex_lock
);
621 finish_wait(&ep
->resp_wq
, &wait
);
624 spin_unlock_bh(&ep
->ex_lock
);
626 EXPORT_SYMBOL(fc_seq_set_resp
);
629 * fc_exch_abort_locked() - Abort an exchange
630 * @ep: The exchange to be aborted
631 * @timer_msec: The period of time to wait before aborting
633 * Abort an exchange and sequence. Generally called because of a
634 * exchange timeout or an abort from the upper layer.
636 * A timer_msec can be specified for abort timeout, if non-zero
637 * timer_msec value is specified then exchange resp handler
638 * will be called with timeout error if no response to abort.
640 * Locking notes: Called with exch lock held
642 * Return value: 0 on success else error code
644 static int fc_exch_abort_locked(struct fc_exch
*ep
,
645 unsigned int timer_msec
)
651 FC_EXCH_DBG(ep
, "exch: abort, time %d msecs\n", timer_msec
);
652 if (ep
->esb_stat
& (ESB_ST_COMPLETE
| ESB_ST_ABNORMAL
) ||
653 ep
->state
& (FC_EX_DONE
| FC_EX_RST_CLEANUP
)) {
654 FC_EXCH_DBG(ep
, "exch: already completed esb %x state %x\n",
655 ep
->esb_stat
, ep
->state
);
660 * Send the abort on a new sequence if possible.
662 sp
= fc_seq_start_next_locked(&ep
->seq
);
667 fc_exch_timer_set_locked(ep
, timer_msec
);
671 * Send an abort for the sequence that timed out.
673 fp
= fc_frame_alloc(ep
->lp
, 0);
675 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
676 fc_fill_fc_hdr(fp
, FC_RCTL_BA_ABTS
, ep
->did
, ep
->sid
,
677 FC_TYPE_BLS
, FC_FC_END_SEQ
|
679 error
= fc_seq_send_locked(ep
->lp
, sp
, fp
);
685 * If not logged into the fabric, don't send ABTS but leave
686 * sequence active until next timeout.
690 ep
->esb_stat
|= ESB_ST_ABNORMAL
;
695 * fc_seq_exch_abort() - Abort an exchange and sequence
696 * @req_sp: The sequence to be aborted
697 * @timer_msec: The period of time to wait before aborting
699 * Generally called because of a timeout or an abort from the upper layer.
701 * Return value: 0 on success else error code
703 int fc_seq_exch_abort(const struct fc_seq
*req_sp
, unsigned int timer_msec
)
708 ep
= fc_seq_exch(req_sp
);
709 spin_lock_bh(&ep
->ex_lock
);
710 error
= fc_exch_abort_locked(ep
, timer_msec
);
711 spin_unlock_bh(&ep
->ex_lock
);
716 * fc_invoke_resp() - invoke ep->resp()
719 * It is assumed that after initialization finished (this means the
720 * first unlock of ex_lock after fc_exch_alloc()) ep->resp and ep->arg are
721 * modified only via fc_seq_set_resp(). This guarantees that none of these
722 * two variables changes if ep->resp_active > 0.
724 * If an fc_seq_set_resp() call is busy modifying ep->resp and ep->arg when
725 * this function is invoked, the first spin_lock_bh() call in this function
726 * will wait until fc_seq_set_resp() has finished modifying these variables.
728 * Since fc_exch_done() invokes fc_seq_set_resp() it is guaranteed that that
729 * ep->resp() won't be invoked after fc_exch_done() has returned.
731 * The response handler itself may invoke fc_exch_done(), which will clear the
735 * Returns true if and only if ep->resp has been invoked.
737 static bool fc_invoke_resp(struct fc_exch
*ep
, struct fc_seq
*sp
,
740 void (*resp
)(struct fc_seq
*, struct fc_frame
*fp
, void *arg
);
744 spin_lock_bh(&ep
->ex_lock
);
746 if (ep
->resp_task
!= current
)
747 ep
->resp_task
= !ep
->resp_task
? current
: NULL
;
750 spin_unlock_bh(&ep
->ex_lock
);
757 spin_lock_bh(&ep
->ex_lock
);
758 if (--ep
->resp_active
== 0)
759 ep
->resp_task
= NULL
;
760 spin_unlock_bh(&ep
->ex_lock
);
762 if (ep
->resp_active
== 0)
763 wake_up(&ep
->resp_wq
);
769 * fc_exch_timeout() - Handle exchange timer expiration
770 * @work: The work_struct identifying the exchange that timed out
772 static void fc_exch_timeout(struct work_struct
*work
)
774 struct fc_exch
*ep
= container_of(work
, struct fc_exch
,
776 struct fc_seq
*sp
= &ep
->seq
;
780 FC_EXCH_DBG(ep
, "Exchange timed out state %x\n", ep
->state
);
782 spin_lock_bh(&ep
->ex_lock
);
783 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
))
786 e_stat
= ep
->esb_stat
;
787 if (e_stat
& ESB_ST_COMPLETE
) {
788 ep
->esb_stat
= e_stat
& ~ESB_ST_REC_QUAL
;
789 spin_unlock_bh(&ep
->ex_lock
);
790 if (e_stat
& ESB_ST_REC_QUAL
)
794 if (e_stat
& ESB_ST_ABNORMAL
)
795 rc
= fc_exch_done_locked(ep
);
796 spin_unlock_bh(&ep
->ex_lock
);
799 fc_invoke_resp(ep
, sp
, ERR_PTR(-FC_EX_TIMEOUT
));
800 fc_seq_set_resp(sp
, NULL
, ep
->arg
);
801 fc_seq_exch_abort(sp
, 2 * ep
->r_a_tov
);
805 spin_unlock_bh(&ep
->ex_lock
);
808 * This release matches the hold taken when the timer was set.
814 * fc_exch_em_alloc() - Allocate an exchange from a specified EM.
815 * @lport: The local port that the exchange is for
816 * @mp: The exchange manager that will allocate the exchange
818 * Returns pointer to allocated fc_exch with exch lock held.
820 static struct fc_exch
*fc_exch_em_alloc(struct fc_lport
*lport
,
821 struct fc_exch_mgr
*mp
)
826 struct fc_exch_pool
*pool
;
828 /* allocate memory for exchange */
829 ep
= mempool_alloc(mp
->ep_pool
, GFP_ATOMIC
);
831 atomic_inc(&mp
->stats
.no_free_exch
);
834 memset(ep
, 0, sizeof(*ep
));
837 pool
= per_cpu_ptr(mp
->pool
, cpu
);
838 spin_lock_bh(&pool
->lock
);
841 /* peek cache of free slot */
842 if (pool
->left
!= FC_XID_UNKNOWN
) {
843 if (!WARN_ON(fc_exch_ptr_get(pool
, pool
->left
))) {
845 pool
->left
= FC_XID_UNKNOWN
;
849 if (pool
->right
!= FC_XID_UNKNOWN
) {
850 if (!WARN_ON(fc_exch_ptr_get(pool
, pool
->right
))) {
852 pool
->right
= FC_XID_UNKNOWN
;
857 index
= pool
->next_index
;
858 /* allocate new exch from pool */
859 while (fc_exch_ptr_get(pool
, index
)) {
860 index
= index
== mp
->pool_max_index
? 0 : index
+ 1;
861 if (index
== pool
->next_index
)
864 pool
->next_index
= index
== mp
->pool_max_index
? 0 : index
+ 1;
866 fc_exch_hold(ep
); /* hold for exch in mp */
867 spin_lock_init(&ep
->ex_lock
);
869 * Hold exch lock for caller to prevent fc_exch_reset()
870 * from releasing exch while fc_exch_alloc() caller is
871 * still working on exch.
873 spin_lock_bh(&ep
->ex_lock
);
875 fc_exch_ptr_set(pool
, index
, ep
);
876 list_add_tail(&ep
->ex_list
, &pool
->ex_list
);
877 fc_seq_alloc(ep
, ep
->seq_id
++);
878 pool
->total_exches
++;
879 spin_unlock_bh(&pool
->lock
);
884 ep
->oxid
= ep
->xid
= (index
<< fc_cpu_order
| cpu
) + mp
->min_xid
;
888 ep
->f_ctl
= FC_FC_FIRST_SEQ
; /* next seq is first seq */
889 ep
->rxid
= FC_XID_UNKNOWN
;
890 ep
->class = mp
->class;
892 init_waitqueue_head(&ep
->resp_wq
);
893 INIT_DELAYED_WORK(&ep
->timeout_work
, fc_exch_timeout
);
897 spin_unlock_bh(&pool
->lock
);
898 atomic_inc(&mp
->stats
.no_free_exch_xid
);
899 mempool_free(ep
, mp
->ep_pool
);
904 * fc_exch_alloc() - Allocate an exchange from an EM on a
905 * local port's list of EMs.
906 * @lport: The local port that will own the exchange
907 * @fp: The FC frame that the exchange will be for
909 * This function walks the list of exchange manager(EM)
910 * anchors to select an EM for a new exchange allocation. The
911 * EM is selected when a NULL match function pointer is encountered
912 * or when a call to a match function returns true.
914 static struct fc_exch
*fc_exch_alloc(struct fc_lport
*lport
,
917 struct fc_exch_mgr_anchor
*ema
;
920 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
) {
921 if (!ema
->match
|| ema
->match(fp
)) {
922 ep
= fc_exch_em_alloc(lport
, ema
->mp
);
931 * fc_exch_find() - Lookup and hold an exchange
932 * @mp: The exchange manager to lookup the exchange from
933 * @xid: The XID of the exchange to look up
935 static struct fc_exch
*fc_exch_find(struct fc_exch_mgr
*mp
, u16 xid
)
937 struct fc_lport
*lport
= mp
->lport
;
938 struct fc_exch_pool
*pool
;
939 struct fc_exch
*ep
= NULL
;
940 u16 cpu
= xid
& fc_cpu_mask
;
942 if (xid
== FC_XID_UNKNOWN
)
945 if (cpu
>= nr_cpu_ids
|| !cpu_possible(cpu
)) {
946 pr_err("host%u: lport %6.6x: xid %d invalid CPU %d\n:",
947 lport
->host
->host_no
, lport
->port_id
, xid
, cpu
);
951 if ((xid
>= mp
->min_xid
) && (xid
<= mp
->max_xid
)) {
952 pool
= per_cpu_ptr(mp
->pool
, cpu
);
953 spin_lock_bh(&pool
->lock
);
954 ep
= fc_exch_ptr_get(pool
, (xid
- mp
->min_xid
) >> fc_cpu_order
);
955 if (ep
== &fc_quarantine_exch
) {
956 FC_LPORT_DBG(lport
, "xid %x quarantined\n", xid
);
960 WARN_ON(ep
->xid
!= xid
);
963 spin_unlock_bh(&pool
->lock
);
970 * fc_exch_done() - Indicate that an exchange/sequence tuple is complete and
971 * the memory allocated for the related objects may be freed.
972 * @sp: The sequence that has completed
974 * Note: May sleep if invoked from outside a response handler.
976 void fc_exch_done(struct fc_seq
*sp
)
978 struct fc_exch
*ep
= fc_seq_exch(sp
);
981 spin_lock_bh(&ep
->ex_lock
);
982 rc
= fc_exch_done_locked(ep
);
983 spin_unlock_bh(&ep
->ex_lock
);
985 fc_seq_set_resp(sp
, NULL
, ep
->arg
);
989 EXPORT_SYMBOL(fc_exch_done
);
992 * fc_exch_resp() - Allocate a new exchange for a response frame
993 * @lport: The local port that the exchange was for
994 * @mp: The exchange manager to allocate the exchange from
995 * @fp: The response frame
997 * Sets the responder ID in the frame header.
999 static struct fc_exch
*fc_exch_resp(struct fc_lport
*lport
,
1000 struct fc_exch_mgr
*mp
,
1001 struct fc_frame
*fp
)
1004 struct fc_frame_header
*fh
;
1006 ep
= fc_exch_alloc(lport
, fp
);
1008 ep
->class = fc_frame_class(fp
);
1011 * Set EX_CTX indicating we're responding on this exchange.
1013 ep
->f_ctl
|= FC_FC_EX_CTX
; /* we're responding */
1014 ep
->f_ctl
&= ~FC_FC_FIRST_SEQ
; /* not new */
1015 fh
= fc_frame_header_get(fp
);
1016 ep
->sid
= ntoh24(fh
->fh_d_id
);
1017 ep
->did
= ntoh24(fh
->fh_s_id
);
1021 * Allocated exchange has placed the XID in the
1022 * originator field. Move it to the responder field,
1023 * and set the originator XID from the frame.
1026 ep
->oxid
= ntohs(fh
->fh_ox_id
);
1027 ep
->esb_stat
|= ESB_ST_RESP
| ESB_ST_SEQ_INIT
;
1028 if ((ntoh24(fh
->fh_f_ctl
) & FC_FC_SEQ_INIT
) == 0)
1029 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
1031 fc_exch_hold(ep
); /* hold for caller */
1032 spin_unlock_bh(&ep
->ex_lock
); /* lock from fc_exch_alloc */
1038 * fc_seq_lookup_recip() - Find a sequence where the other end
1039 * originated the sequence
1040 * @lport: The local port that the frame was sent to
1041 * @mp: The Exchange Manager to lookup the exchange from
1042 * @fp: The frame associated with the sequence we're looking for
1044 * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
1045 * on the ep that should be released by the caller.
1047 static enum fc_pf_rjt_reason
fc_seq_lookup_recip(struct fc_lport
*lport
,
1048 struct fc_exch_mgr
*mp
,
1049 struct fc_frame
*fp
)
1051 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1052 struct fc_exch
*ep
= NULL
;
1053 struct fc_seq
*sp
= NULL
;
1054 enum fc_pf_rjt_reason reject
= FC_RJT_NONE
;
1058 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1059 WARN_ON((f_ctl
& FC_FC_SEQ_CTX
) != 0);
1062 * Lookup or create the exchange if we will be creating the sequence.
1064 if (f_ctl
& FC_FC_EX_CTX
) {
1065 xid
= ntohs(fh
->fh_ox_id
); /* we originated exch */
1066 ep
= fc_exch_find(mp
, xid
);
1068 atomic_inc(&mp
->stats
.xid_not_found
);
1069 reject
= FC_RJT_OX_ID
;
1072 if (ep
->rxid
== FC_XID_UNKNOWN
)
1073 ep
->rxid
= ntohs(fh
->fh_rx_id
);
1074 else if (ep
->rxid
!= ntohs(fh
->fh_rx_id
)) {
1075 reject
= FC_RJT_OX_ID
;
1079 xid
= ntohs(fh
->fh_rx_id
); /* we are the responder */
1082 * Special case for MDS issuing an ELS TEST with a
1084 * XXX take this out once we do the proper reject.
1086 if (xid
== 0 && fh
->fh_r_ctl
== FC_RCTL_ELS_REQ
&&
1087 fc_frame_payload_op(fp
) == ELS_TEST
) {
1088 fh
->fh_rx_id
= htons(FC_XID_UNKNOWN
);
1089 xid
= FC_XID_UNKNOWN
;
1093 * new sequence - find the exchange
1095 ep
= fc_exch_find(mp
, xid
);
1096 if ((f_ctl
& FC_FC_FIRST_SEQ
) && fc_sof_is_init(fr_sof(fp
))) {
1098 atomic_inc(&mp
->stats
.xid_busy
);
1099 reject
= FC_RJT_RX_ID
;
1102 ep
= fc_exch_resp(lport
, mp
, fp
);
1104 reject
= FC_RJT_EXCH_EST
; /* XXX */
1107 xid
= ep
->xid
; /* get our XID */
1109 atomic_inc(&mp
->stats
.xid_not_found
);
1110 reject
= FC_RJT_RX_ID
; /* XID not found */
1115 spin_lock_bh(&ep
->ex_lock
);
1117 * At this point, we have the exchange held.
1118 * Find or create the sequence.
1120 if (fc_sof_is_init(fr_sof(fp
))) {
1122 sp
->ssb_stat
|= SSB_ST_RESP
;
1123 sp
->id
= fh
->fh_seq_id
;
1126 if (sp
->id
!= fh
->fh_seq_id
) {
1127 atomic_inc(&mp
->stats
.seq_not_found
);
1128 if (f_ctl
& FC_FC_END_SEQ
) {
1130 * Update sequence_id based on incoming last
1131 * frame of sequence exchange. This is needed
1132 * for FC target where DDP has been used
1133 * on target where, stack is indicated only
1134 * about last frame's (payload _header) header.
1135 * Whereas "seq_id" which is part of
1136 * frame_header is allocated by initiator
1137 * which is totally different from "seq_id"
1138 * allocated when XFER_RDY was sent by target.
1139 * To avoid false -ve which results into not
1140 * sending RSP, hence write request on other
1141 * end never finishes.
1143 sp
->ssb_stat
|= SSB_ST_RESP
;
1144 sp
->id
= fh
->fh_seq_id
;
1146 spin_unlock_bh(&ep
->ex_lock
);
1148 /* sequence/exch should exist */
1149 reject
= FC_RJT_SEQ_ID
;
1154 WARN_ON(ep
!= fc_seq_exch(sp
));
1156 if (f_ctl
& FC_FC_SEQ_INIT
)
1157 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1158 spin_unlock_bh(&ep
->ex_lock
);
1164 fc_exch_done(&ep
->seq
);
1165 fc_exch_release(ep
); /* hold from fc_exch_find/fc_exch_resp */
1170 * fc_seq_lookup_orig() - Find a sequence where this end
1171 * originated the sequence
1172 * @mp: The Exchange Manager to lookup the exchange from
1173 * @fp: The frame associated with the sequence we're looking for
1175 * Does not hold the sequence for the caller.
1177 static struct fc_seq
*fc_seq_lookup_orig(struct fc_exch_mgr
*mp
,
1178 struct fc_frame
*fp
)
1180 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1182 struct fc_seq
*sp
= NULL
;
1186 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1187 WARN_ON((f_ctl
& FC_FC_SEQ_CTX
) != FC_FC_SEQ_CTX
);
1188 xid
= ntohs((f_ctl
& FC_FC_EX_CTX
) ? fh
->fh_ox_id
: fh
->fh_rx_id
);
1189 ep
= fc_exch_find(mp
, xid
);
1192 if (ep
->seq
.id
== fh
->fh_seq_id
) {
1194 * Save the RX_ID if we didn't previously know it.
1197 if ((f_ctl
& FC_FC_EX_CTX
) != 0 &&
1198 ep
->rxid
== FC_XID_UNKNOWN
) {
1199 ep
->rxid
= ntohs(fh
->fh_rx_id
);
1202 fc_exch_release(ep
);
1207 * fc_exch_set_addr() - Set the source and destination IDs for an exchange
1208 * @ep: The exchange to set the addresses for
1209 * @orig_id: The originator's ID
1210 * @resp_id: The responder's ID
1212 * Note this must be done before the first sequence of the exchange is sent.
1214 static void fc_exch_set_addr(struct fc_exch
*ep
,
1215 u32 orig_id
, u32 resp_id
)
1218 if (ep
->esb_stat
& ESB_ST_RESP
) {
1228 * fc_seq_els_rsp_send() - Send an ELS response using information from
1229 * the existing sequence/exchange.
1230 * @fp: The received frame
1231 * @els_cmd: The ELS command to be sent
1232 * @els_data: The ELS data to be sent
1234 * The received frame is not freed.
1236 void fc_seq_els_rsp_send(struct fc_frame
*fp
, enum fc_els_cmd els_cmd
,
1237 struct fc_seq_els_data
*els_data
)
1241 fc_seq_ls_rjt(fp
, els_data
->reason
, els_data
->explan
);
1247 fc_exch_els_rrq(fp
);
1250 fc_exch_els_rec(fp
);
1253 FC_LPORT_DBG(fr_dev(fp
), "Invalid ELS CMD:%x\n", els_cmd
);
1256 EXPORT_SYMBOL_GPL(fc_seq_els_rsp_send
);
1259 * fc_seq_send_last() - Send a sequence that is the last in the exchange
1260 * @sp: The sequence that is to be sent
1261 * @fp: The frame that will be sent on the sequence
1262 * @rctl: The R_CTL information to be sent
1263 * @fh_type: The frame header type
1265 static void fc_seq_send_last(struct fc_seq
*sp
, struct fc_frame
*fp
,
1266 enum fc_rctl rctl
, enum fc_fh_type fh_type
)
1269 struct fc_exch
*ep
= fc_seq_exch(sp
);
1271 f_ctl
= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
;
1273 fc_fill_fc_hdr(fp
, rctl
, ep
->did
, ep
->sid
, fh_type
, f_ctl
, 0);
1274 fc_seq_send_locked(ep
->lp
, sp
, fp
);
1278 * fc_seq_send_ack() - Send an acknowledgement that we've received a frame
1279 * @sp: The sequence to send the ACK on
1280 * @rx_fp: The received frame that is being acknoledged
1282 * Send ACK_1 (or equiv.) indicating we received something.
1284 static void fc_seq_send_ack(struct fc_seq
*sp
, const struct fc_frame
*rx_fp
)
1286 struct fc_frame
*fp
;
1287 struct fc_frame_header
*rx_fh
;
1288 struct fc_frame_header
*fh
;
1289 struct fc_exch
*ep
= fc_seq_exch(sp
);
1290 struct fc_lport
*lport
= ep
->lp
;
1294 * Don't send ACKs for class 3.
1296 if (fc_sof_needs_ack(fr_sof(rx_fp
))) {
1297 fp
= fc_frame_alloc(lport
, 0);
1299 FC_EXCH_DBG(ep
, "Drop ACK request, out of memory\n");
1303 fh
= fc_frame_header_get(fp
);
1304 fh
->fh_r_ctl
= FC_RCTL_ACK_1
;
1305 fh
->fh_type
= FC_TYPE_BLS
;
1308 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1309 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1310 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1311 * Last ACK uses bits 7-6 (continue sequence),
1312 * bits 5-4 are meaningful (what kind of ACK to use).
1314 rx_fh
= fc_frame_header_get(rx_fp
);
1315 f_ctl
= ntoh24(rx_fh
->fh_f_ctl
);
1316 f_ctl
&= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
|
1317 FC_FC_FIRST_SEQ
| FC_FC_LAST_SEQ
|
1318 FC_FC_END_SEQ
| FC_FC_END_CONN
| FC_FC_SEQ_INIT
|
1319 FC_FC_RETX_SEQ
| FC_FC_UNI_TX
;
1320 f_ctl
^= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
;
1321 hton24(fh
->fh_f_ctl
, f_ctl
);
1323 fc_exch_setup_hdr(ep
, fp
, f_ctl
);
1324 fh
->fh_seq_id
= rx_fh
->fh_seq_id
;
1325 fh
->fh_seq_cnt
= rx_fh
->fh_seq_cnt
;
1326 fh
->fh_parm_offset
= htonl(1); /* ack single frame */
1328 fr_sof(fp
) = fr_sof(rx_fp
);
1329 if (f_ctl
& FC_FC_END_SEQ
)
1330 fr_eof(fp
) = FC_EOF_T
;
1332 fr_eof(fp
) = FC_EOF_N
;
1334 lport
->tt
.frame_send(lport
, fp
);
1339 * fc_exch_send_ba_rjt() - Send BLS Reject
1340 * @rx_fp: The frame being rejected
1341 * @reason: The reason the frame is being rejected
1342 * @explan: The explanation for the rejection
1344 * This is for rejecting BA_ABTS only.
1346 static void fc_exch_send_ba_rjt(struct fc_frame
*rx_fp
,
1347 enum fc_ba_rjt_reason reason
,
1348 enum fc_ba_rjt_explan explan
)
1350 struct fc_frame
*fp
;
1351 struct fc_frame_header
*rx_fh
;
1352 struct fc_frame_header
*fh
;
1353 struct fc_ba_rjt
*rp
;
1355 struct fc_lport
*lport
;
1358 lport
= fr_dev(rx_fp
);
1360 fp
= fc_frame_alloc(lport
, sizeof(*rp
));
1362 FC_EXCH_DBG(fc_seq_exch(sp
),
1363 "Drop BA_RJT request, out of memory\n");
1366 fh
= fc_frame_header_get(fp
);
1367 rx_fh
= fc_frame_header_get(rx_fp
);
1369 memset(fh
, 0, sizeof(*fh
) + sizeof(*rp
));
1371 rp
= fc_frame_payload_get(fp
, sizeof(*rp
));
1372 rp
->br_reason
= reason
;
1373 rp
->br_explan
= explan
;
1376 * seq_id, cs_ctl, df_ctl and param/offset are zero.
1378 memcpy(fh
->fh_s_id
, rx_fh
->fh_d_id
, 3);
1379 memcpy(fh
->fh_d_id
, rx_fh
->fh_s_id
, 3);
1380 fh
->fh_ox_id
= rx_fh
->fh_ox_id
;
1381 fh
->fh_rx_id
= rx_fh
->fh_rx_id
;
1382 fh
->fh_seq_cnt
= rx_fh
->fh_seq_cnt
;
1383 fh
->fh_r_ctl
= FC_RCTL_BA_RJT
;
1384 fh
->fh_type
= FC_TYPE_BLS
;
1387 * Form f_ctl by inverting EX_CTX and SEQ_CTX (bits 23, 22).
1388 * Echo FIRST_SEQ, LAST_SEQ, END_SEQ, END_CONN, SEQ_INIT.
1389 * Bits 9-8 are meaningful (retransmitted or unidirectional).
1390 * Last ACK uses bits 7-6 (continue sequence),
1391 * bits 5-4 are meaningful (what kind of ACK to use).
1392 * Always set LAST_SEQ, END_SEQ.
1394 f_ctl
= ntoh24(rx_fh
->fh_f_ctl
);
1395 f_ctl
&= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
|
1396 FC_FC_END_CONN
| FC_FC_SEQ_INIT
|
1397 FC_FC_RETX_SEQ
| FC_FC_UNI_TX
;
1398 f_ctl
^= FC_FC_EX_CTX
| FC_FC_SEQ_CTX
;
1399 f_ctl
|= FC_FC_LAST_SEQ
| FC_FC_END_SEQ
;
1400 f_ctl
&= ~FC_FC_FIRST_SEQ
;
1401 hton24(fh
->fh_f_ctl
, f_ctl
);
1403 fr_sof(fp
) = fc_sof_class(fr_sof(rx_fp
));
1404 fr_eof(fp
) = FC_EOF_T
;
1405 if (fc_sof_needs_ack(fr_sof(fp
)))
1406 fr_eof(fp
) = FC_EOF_N
;
1408 lport
->tt
.frame_send(lport
, fp
);
1412 * fc_exch_recv_abts() - Handle an incoming ABTS
1413 * @ep: The exchange the abort was on
1414 * @rx_fp: The ABTS frame
1416 * This would be for target mode usually, but could be due to lost
1417 * FCP transfer ready, confirm or RRQ. We always handle this as an
1418 * exchange abort, ignoring the parameter.
1420 static void fc_exch_recv_abts(struct fc_exch
*ep
, struct fc_frame
*rx_fp
)
1422 struct fc_frame
*fp
;
1423 struct fc_ba_acc
*ap
;
1424 struct fc_frame_header
*fh
;
1430 FC_EXCH_DBG(ep
, "exch: ABTS received\n");
1431 fp
= fc_frame_alloc(ep
->lp
, sizeof(*ap
));
1433 FC_EXCH_DBG(ep
, "Drop ABTS request, out of memory\n");
1437 spin_lock_bh(&ep
->ex_lock
);
1438 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1439 spin_unlock_bh(&ep
->ex_lock
);
1440 FC_EXCH_DBG(ep
, "exch: ABTS rejected, exchange complete\n");
1444 if (!(ep
->esb_stat
& ESB_ST_REC_QUAL
)) {
1445 ep
->esb_stat
|= ESB_ST_REC_QUAL
;
1446 fc_exch_hold(ep
); /* hold for REC_QUAL */
1448 fc_exch_timer_set_locked(ep
, ep
->r_a_tov
);
1449 fh
= fc_frame_header_get(fp
);
1450 ap
= fc_frame_payload_get(fp
, sizeof(*ap
));
1451 memset(ap
, 0, sizeof(*ap
));
1453 ap
->ba_high_seq_cnt
= htons(0xffff);
1454 if (sp
->ssb_stat
& SSB_ST_RESP
) {
1455 ap
->ba_seq_id
= sp
->id
;
1456 ap
->ba_seq_id_val
= FC_BA_SEQ_ID_VAL
;
1457 ap
->ba_high_seq_cnt
= fh
->fh_seq_cnt
;
1458 ap
->ba_low_seq_cnt
= htons(sp
->cnt
);
1460 sp
= fc_seq_start_next_locked(sp
);
1461 fc_seq_send_last(sp
, fp
, FC_RCTL_BA_ACC
, FC_TYPE_BLS
);
1462 ep
->esb_stat
|= ESB_ST_ABNORMAL
;
1463 spin_unlock_bh(&ep
->ex_lock
);
1466 fc_frame_free(rx_fp
);
1470 fc_exch_send_ba_rjt(rx_fp
, FC_BA_RJT_UNABLE
, FC_BA_RJT_INV_XID
);
1475 * fc_seq_assign() - Assign exchange and sequence for incoming request
1476 * @lport: The local port that received the request
1477 * @fp: The request frame
1479 * On success, the sequence pointer will be returned and also in fr_seq(@fp).
1480 * A reference will be held on the exchange/sequence for the caller, which
1481 * must call fc_seq_release().
1483 struct fc_seq
*fc_seq_assign(struct fc_lport
*lport
, struct fc_frame
*fp
)
1485 struct fc_exch_mgr_anchor
*ema
;
1487 WARN_ON(lport
!= fr_dev(fp
));
1488 WARN_ON(fr_seq(fp
));
1491 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
)
1492 if ((!ema
->match
|| ema
->match(fp
)) &&
1493 fc_seq_lookup_recip(lport
, ema
->mp
, fp
) == FC_RJT_NONE
)
1497 EXPORT_SYMBOL(fc_seq_assign
);
1500 * fc_seq_release() - Release the hold
1501 * @sp: The sequence.
1503 void fc_seq_release(struct fc_seq
*sp
)
1505 fc_exch_release(fc_seq_exch(sp
));
1507 EXPORT_SYMBOL(fc_seq_release
);
1510 * fc_exch_recv_req() - Handler for an incoming request
1511 * @lport: The local port that received the request
1512 * @mp: The EM that the exchange is on
1513 * @fp: The request frame
1515 * This is used when the other end is originating the exchange
1518 static void fc_exch_recv_req(struct fc_lport
*lport
, struct fc_exch_mgr
*mp
,
1519 struct fc_frame
*fp
)
1521 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1522 struct fc_seq
*sp
= NULL
;
1523 struct fc_exch
*ep
= NULL
;
1524 enum fc_pf_rjt_reason reject
;
1526 /* We can have the wrong fc_lport at this point with NPIV, which is a
1527 * problem now that we know a new exchange needs to be allocated
1529 lport
= fc_vport_id_lookup(lport
, ntoh24(fh
->fh_d_id
));
1536 BUG_ON(fr_seq(fp
)); /* XXX remove later */
1539 * If the RX_ID is 0xffff, don't allocate an exchange.
1540 * The upper-level protocol may request one later, if needed.
1542 if (fh
->fh_rx_id
== htons(FC_XID_UNKNOWN
))
1543 return fc_lport_recv(lport
, fp
);
1545 reject
= fc_seq_lookup_recip(lport
, mp
, fp
);
1546 if (reject
== FC_RJT_NONE
) {
1547 sp
= fr_seq(fp
); /* sequence will be held */
1548 ep
= fc_seq_exch(sp
);
1549 fc_seq_send_ack(sp
, fp
);
1550 ep
->encaps
= fr_encaps(fp
);
1553 * Call the receive function.
1555 * The receive function may allocate a new sequence
1556 * over the old one, so we shouldn't change the
1557 * sequence after this.
1559 * The frame will be freed by the receive function.
1560 * If new exch resp handler is valid then call that
1563 if (!fc_invoke_resp(ep
, sp
, fp
))
1564 fc_lport_recv(lport
, fp
);
1565 fc_exch_release(ep
); /* release from lookup */
1567 FC_LPORT_DBG(lport
, "exch/seq lookup failed: reject %x\n",
1574 * fc_exch_recv_seq_resp() - Handler for an incoming response where the other
1575 * end is the originator of the sequence that is a
1576 * response to our initial exchange
1577 * @mp: The EM that the exchange is on
1578 * @fp: The response frame
1580 static void fc_exch_recv_seq_resp(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1582 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
1589 ep
= fc_exch_find(mp
, ntohs(fh
->fh_ox_id
));
1591 atomic_inc(&mp
->stats
.xid_not_found
);
1594 if (ep
->esb_stat
& ESB_ST_COMPLETE
) {
1595 atomic_inc(&mp
->stats
.xid_not_found
);
1598 if (ep
->rxid
== FC_XID_UNKNOWN
)
1599 ep
->rxid
= ntohs(fh
->fh_rx_id
);
1600 if (ep
->sid
!= 0 && ep
->sid
!= ntoh24(fh
->fh_d_id
)) {
1601 atomic_inc(&mp
->stats
.xid_not_found
);
1604 if (ep
->did
!= ntoh24(fh
->fh_s_id
) &&
1605 ep
->did
!= FC_FID_FLOGI
) {
1606 atomic_inc(&mp
->stats
.xid_not_found
);
1611 if (fc_sof_is_init(sof
)) {
1612 sp
->ssb_stat
|= SSB_ST_RESP
;
1613 sp
->id
= fh
->fh_seq_id
;
1616 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1619 spin_lock_bh(&ep
->ex_lock
);
1620 if (f_ctl
& FC_FC_SEQ_INIT
)
1621 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1622 spin_unlock_bh(&ep
->ex_lock
);
1624 if (fc_sof_needs_ack(sof
))
1625 fc_seq_send_ack(sp
, fp
);
1627 if (fh
->fh_type
!= FC_TYPE_FCP
&& fr_eof(fp
) == FC_EOF_T
&&
1628 (f_ctl
& (FC_FC_LAST_SEQ
| FC_FC_END_SEQ
)) ==
1629 (FC_FC_LAST_SEQ
| FC_FC_END_SEQ
)) {
1630 spin_lock_bh(&ep
->ex_lock
);
1631 rc
= fc_exch_done_locked(ep
);
1632 WARN_ON(fc_seq_exch(sp
) != ep
);
1633 spin_unlock_bh(&ep
->ex_lock
);
1639 * Call the receive function.
1640 * The sequence is held (has a refcnt) for us,
1641 * but not for the receive function.
1643 * The receive function may allocate a new sequence
1644 * over the old one, so we shouldn't change the
1645 * sequence after this.
1647 * The frame will be freed by the receive function.
1648 * If new exch resp handler is valid then call that
1651 if (!fc_invoke_resp(ep
, sp
, fp
))
1654 fc_exch_release(ep
);
1657 fc_exch_release(ep
);
1663 * fc_exch_recv_resp() - Handler for a sequence where other end is
1664 * responding to our sequence
1665 * @mp: The EM that the exchange is on
1666 * @fp: The response frame
1668 static void fc_exch_recv_resp(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1672 sp
= fc_seq_lookup_orig(mp
, fp
); /* doesn't hold sequence */
1675 atomic_inc(&mp
->stats
.xid_not_found
);
1677 atomic_inc(&mp
->stats
.non_bls_resp
);
1683 * fc_exch_abts_resp() - Handler for a response to an ABT
1684 * @ep: The exchange that the frame is on
1685 * @fp: The response frame
1687 * This response would be to an ABTS cancelling an exchange or sequence.
1688 * The response can be either BA_ACC or BA_RJT
1690 static void fc_exch_abts_resp(struct fc_exch
*ep
, struct fc_frame
*fp
)
1692 struct fc_frame_header
*fh
;
1693 struct fc_ba_acc
*ap
;
1697 int rc
= 1, has_rec
= 0;
1699 fh
= fc_frame_header_get(fp
);
1700 FC_EXCH_DBG(ep
, "exch: BLS rctl %x - %s\n", fh
->fh_r_ctl
,
1701 fc_exch_rctl_name(fh
->fh_r_ctl
));
1703 if (cancel_delayed_work_sync(&ep
->timeout_work
)) {
1704 FC_EXCH_DBG(ep
, "Exchange timer canceled due to ABTS response\n");
1705 fc_exch_release(ep
); /* release from pending timer hold */
1708 spin_lock_bh(&ep
->ex_lock
);
1709 switch (fh
->fh_r_ctl
) {
1710 case FC_RCTL_BA_ACC
:
1711 ap
= fc_frame_payload_get(fp
, sizeof(*ap
));
1716 * Decide whether to establish a Recovery Qualifier.
1717 * We do this if there is a non-empty SEQ_CNT range and
1718 * SEQ_ID is the same as the one we aborted.
1720 low
= ntohs(ap
->ba_low_seq_cnt
);
1721 high
= ntohs(ap
->ba_high_seq_cnt
);
1722 if ((ep
->esb_stat
& ESB_ST_REC_QUAL
) == 0 &&
1723 (ap
->ba_seq_id_val
!= FC_BA_SEQ_ID_VAL
||
1724 ap
->ba_seq_id
== ep
->seq_id
) && low
!= high
) {
1725 ep
->esb_stat
|= ESB_ST_REC_QUAL
;
1726 fc_exch_hold(ep
); /* hold for recovery qualifier */
1730 case FC_RCTL_BA_RJT
:
1736 /* do we need to do some other checks here. Can we reuse more of
1737 * fc_exch_recv_seq_resp
1741 * do we want to check END_SEQ as well as LAST_SEQ here?
1743 if (ep
->fh_type
!= FC_TYPE_FCP
&&
1744 ntoh24(fh
->fh_f_ctl
) & FC_FC_LAST_SEQ
)
1745 rc
= fc_exch_done_locked(ep
);
1746 spin_unlock_bh(&ep
->ex_lock
);
1751 if (!fc_invoke_resp(ep
, sp
, fp
))
1754 fc_exch_timer_set(ep
, ep
->r_a_tov
);
1755 fc_exch_release(ep
);
1759 * fc_exch_recv_bls() - Handler for a BLS sequence
1760 * @mp: The EM that the exchange is on
1761 * @fp: The request frame
1763 * The BLS frame is always a sequence initiated by the remote side.
1764 * We may be either the originator or recipient of the exchange.
1766 static void fc_exch_recv_bls(struct fc_exch_mgr
*mp
, struct fc_frame
*fp
)
1768 struct fc_frame_header
*fh
;
1772 fh
= fc_frame_header_get(fp
);
1773 f_ctl
= ntoh24(fh
->fh_f_ctl
);
1776 ep
= fc_exch_find(mp
, (f_ctl
& FC_FC_EX_CTX
) ?
1777 ntohs(fh
->fh_ox_id
) : ntohs(fh
->fh_rx_id
));
1778 if (ep
&& (f_ctl
& FC_FC_SEQ_INIT
)) {
1779 spin_lock_bh(&ep
->ex_lock
);
1780 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
1781 spin_unlock_bh(&ep
->ex_lock
);
1783 if (f_ctl
& FC_FC_SEQ_CTX
) {
1785 * A response to a sequence we initiated.
1786 * This should only be ACKs for class 2 or F.
1788 switch (fh
->fh_r_ctl
) {
1794 FC_EXCH_DBG(ep
, "BLS rctl %x - %s received\n",
1796 fc_exch_rctl_name(fh
->fh_r_ctl
));
1801 switch (fh
->fh_r_ctl
) {
1802 case FC_RCTL_BA_RJT
:
1803 case FC_RCTL_BA_ACC
:
1805 fc_exch_abts_resp(ep
, fp
);
1809 case FC_RCTL_BA_ABTS
:
1811 fc_exch_recv_abts(ep
, fp
);
1815 default: /* ignore junk */
1821 fc_exch_release(ep
); /* release hold taken by fc_exch_find */
1825 * fc_seq_ls_acc() - Accept sequence with LS_ACC
1826 * @rx_fp: The received frame, not freed here.
1828 * If this fails due to allocation or transmit congestion, assume the
1829 * originator will repeat the sequence.
1831 static void fc_seq_ls_acc(struct fc_frame
*rx_fp
)
1833 struct fc_lport
*lport
;
1834 struct fc_els_ls_acc
*acc
;
1835 struct fc_frame
*fp
;
1838 lport
= fr_dev(rx_fp
);
1840 fp
= fc_frame_alloc(lport
, sizeof(*acc
));
1842 FC_EXCH_DBG(fc_seq_exch(sp
),
1843 "exch: drop LS_ACC, out of memory\n");
1846 acc
= fc_frame_payload_get(fp
, sizeof(*acc
));
1847 memset(acc
, 0, sizeof(*acc
));
1848 acc
->la_cmd
= ELS_LS_ACC
;
1849 fc_fill_reply_hdr(fp
, rx_fp
, FC_RCTL_ELS_REP
, 0);
1850 lport
->tt
.frame_send(lport
, fp
);
1854 * fc_seq_ls_rjt() - Reject a sequence with ELS LS_RJT
1855 * @rx_fp: The received frame, not freed here.
1856 * @reason: The reason the sequence is being rejected
1857 * @explan: The explanation for the rejection
1859 * If this fails due to allocation or transmit congestion, assume the
1860 * originator will repeat the sequence.
1862 static void fc_seq_ls_rjt(struct fc_frame
*rx_fp
, enum fc_els_rjt_reason reason
,
1863 enum fc_els_rjt_explan explan
)
1865 struct fc_lport
*lport
;
1866 struct fc_els_ls_rjt
*rjt
;
1867 struct fc_frame
*fp
;
1870 lport
= fr_dev(rx_fp
);
1872 fp
= fc_frame_alloc(lport
, sizeof(*rjt
));
1874 FC_EXCH_DBG(fc_seq_exch(sp
),
1875 "exch: drop LS_ACC, out of memory\n");
1878 rjt
= fc_frame_payload_get(fp
, sizeof(*rjt
));
1879 memset(rjt
, 0, sizeof(*rjt
));
1880 rjt
->er_cmd
= ELS_LS_RJT
;
1881 rjt
->er_reason
= reason
;
1882 rjt
->er_explan
= explan
;
1883 fc_fill_reply_hdr(fp
, rx_fp
, FC_RCTL_ELS_REP
, 0);
1884 lport
->tt
.frame_send(lport
, fp
);
1888 * fc_exch_reset() - Reset an exchange
1889 * @ep: The exchange to be reset
1891 * Note: May sleep if invoked from outside a response handler.
1893 static void fc_exch_reset(struct fc_exch
*ep
)
1898 spin_lock_bh(&ep
->ex_lock
);
1899 ep
->state
|= FC_EX_RST_CLEANUP
;
1900 fc_exch_timer_cancel(ep
);
1901 if (ep
->esb_stat
& ESB_ST_REC_QUAL
)
1902 atomic_dec(&ep
->ex_refcnt
); /* drop hold for rec_qual */
1903 ep
->esb_stat
&= ~ESB_ST_REC_QUAL
;
1905 rc
= fc_exch_done_locked(ep
);
1906 spin_unlock_bh(&ep
->ex_lock
);
1913 fc_invoke_resp(ep
, sp
, ERR_PTR(-FC_EX_CLOSED
));
1914 fc_seq_set_resp(sp
, NULL
, ep
->arg
);
1915 fc_exch_release(ep
);
1919 * fc_exch_pool_reset() - Reset a per cpu exchange pool
1920 * @lport: The local port that the exchange pool is on
1921 * @pool: The exchange pool to be reset
1922 * @sid: The source ID
1923 * @did: The destination ID
1925 * Resets a per cpu exches pool, releasing all of its sequences
1926 * and exchanges. If sid is non-zero then reset only exchanges
1927 * we sourced from the local port's FID. If did is non-zero then
1928 * only reset exchanges destined for the local port's FID.
1930 static void fc_exch_pool_reset(struct fc_lport
*lport
,
1931 struct fc_exch_pool
*pool
,
1935 struct fc_exch
*next
;
1937 spin_lock_bh(&pool
->lock
);
1939 list_for_each_entry_safe(ep
, next
, &pool
->ex_list
, ex_list
) {
1940 if ((lport
== ep
->lp
) &&
1941 (sid
== 0 || sid
== ep
->sid
) &&
1942 (did
== 0 || did
== ep
->did
)) {
1944 spin_unlock_bh(&pool
->lock
);
1948 fc_exch_release(ep
);
1949 spin_lock_bh(&pool
->lock
);
1952 * must restart loop incase while lock
1953 * was down multiple eps were released.
1958 pool
->next_index
= 0;
1959 pool
->left
= FC_XID_UNKNOWN
;
1960 pool
->right
= FC_XID_UNKNOWN
;
1961 spin_unlock_bh(&pool
->lock
);
1965 * fc_exch_mgr_reset() - Reset all EMs of a local port
1966 * @lport: The local port whose EMs are to be reset
1967 * @sid: The source ID
1968 * @did: The destination ID
1970 * Reset all EMs associated with a given local port. Release all
1971 * sequences and exchanges. If sid is non-zero then reset only the
1972 * exchanges sent from the local port's FID. If did is non-zero then
1973 * reset only exchanges destined for the local port's FID.
1975 void fc_exch_mgr_reset(struct fc_lport
*lport
, u32 sid
, u32 did
)
1977 struct fc_exch_mgr_anchor
*ema
;
1980 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
) {
1981 for_each_possible_cpu(cpu
)
1982 fc_exch_pool_reset(lport
,
1983 per_cpu_ptr(ema
->mp
->pool
, cpu
),
1987 EXPORT_SYMBOL(fc_exch_mgr_reset
);
1990 * fc_exch_lookup() - find an exchange
1991 * @lport: The local port
1992 * @xid: The exchange ID
1994 * Returns exchange pointer with hold for caller, or NULL if not found.
1996 static struct fc_exch
*fc_exch_lookup(struct fc_lport
*lport
, u32 xid
)
1998 struct fc_exch_mgr_anchor
*ema
;
2000 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
)
2001 if (ema
->mp
->min_xid
<= xid
&& xid
<= ema
->mp
->max_xid
)
2002 return fc_exch_find(ema
->mp
, xid
);
2007 * fc_exch_els_rec() - Handler for ELS REC (Read Exchange Concise) requests
2008 * @rfp: The REC frame, not freed here.
2010 * Note that the requesting port may be different than the S_ID in the request.
2012 static void fc_exch_els_rec(struct fc_frame
*rfp
)
2014 struct fc_lport
*lport
;
2015 struct fc_frame
*fp
;
2017 struct fc_els_rec
*rp
;
2018 struct fc_els_rec_acc
*acc
;
2019 enum fc_els_rjt_reason reason
= ELS_RJT_LOGIC
;
2020 enum fc_els_rjt_explan explan
;
2022 u16 xid
, rxid
, oxid
;
2024 lport
= fr_dev(rfp
);
2025 rp
= fc_frame_payload_get(rfp
, sizeof(*rp
));
2026 explan
= ELS_EXPL_INV_LEN
;
2029 sid
= ntoh24(rp
->rec_s_id
);
2030 rxid
= ntohs(rp
->rec_rx_id
);
2031 oxid
= ntohs(rp
->rec_ox_id
);
2033 explan
= ELS_EXPL_OXID_RXID
;
2034 if (sid
== fc_host_port_id(lport
->host
))
2038 if (xid
== FC_XID_UNKNOWN
) {
2040 "REC request from %x: invalid rxid %x oxid %x\n",
2044 ep
= fc_exch_lookup(lport
, xid
);
2047 "REC request from %x: rxid %x oxid %x not found\n",
2051 FC_EXCH_DBG(ep
, "REC request from %x: rxid %x oxid %x\n",
2053 if (ep
->oid
!= sid
|| oxid
!= ep
->oxid
)
2055 if (rxid
!= FC_XID_UNKNOWN
&& rxid
!= ep
->rxid
)
2057 fp
= fc_frame_alloc(lport
, sizeof(*acc
));
2059 FC_EXCH_DBG(ep
, "Drop REC request, out of memory\n");
2063 acc
= fc_frame_payload_get(fp
, sizeof(*acc
));
2064 memset(acc
, 0, sizeof(*acc
));
2065 acc
->reca_cmd
= ELS_LS_ACC
;
2066 acc
->reca_ox_id
= rp
->rec_ox_id
;
2067 memcpy(acc
->reca_ofid
, rp
->rec_s_id
, 3);
2068 acc
->reca_rx_id
= htons(ep
->rxid
);
2069 if (ep
->sid
== ep
->oid
)
2070 hton24(acc
->reca_rfid
, ep
->did
);
2072 hton24(acc
->reca_rfid
, ep
->sid
);
2073 acc
->reca_fc4value
= htonl(ep
->seq
.rec_data
);
2074 acc
->reca_e_stat
= htonl(ep
->esb_stat
& (ESB_ST_RESP
|
2077 fc_fill_reply_hdr(fp
, rfp
, FC_RCTL_ELS_REP
, 0);
2078 lport
->tt
.frame_send(lport
, fp
);
2080 fc_exch_release(ep
);
2084 fc_exch_release(ep
);
2086 fc_seq_ls_rjt(rfp
, reason
, explan
);
2090 * fc_exch_rrq_resp() - Handler for RRQ responses
2091 * @sp: The sequence that the RRQ is on
2092 * @fp: The RRQ frame
2093 * @arg: The exchange that the RRQ is on
2095 * TODO: fix error handler.
2097 static void fc_exch_rrq_resp(struct fc_seq
*sp
, struct fc_frame
*fp
, void *arg
)
2099 struct fc_exch
*aborted_ep
= arg
;
2103 int err
= PTR_ERR(fp
);
2105 if (err
== -FC_EX_CLOSED
|| err
== -FC_EX_TIMEOUT
)
2107 FC_EXCH_DBG(aborted_ep
, "Cannot process RRQ, "
2108 "frame error %d\n", err
);
2112 op
= fc_frame_payload_op(fp
);
2117 FC_EXCH_DBG(aborted_ep
, "LS_RJT for RRQ\n");
2122 FC_EXCH_DBG(aborted_ep
, "unexpected response op %x for RRQ\n",
2128 fc_exch_done(&aborted_ep
->seq
);
2129 /* drop hold for rec qual */
2130 fc_exch_release(aborted_ep
);
2135 * fc_exch_seq_send() - Send a frame using a new exchange and sequence
2136 * @lport: The local port to send the frame on
2137 * @fp: The frame to be sent
2138 * @resp: The response handler for this request
2139 * @destructor: The destructor for the exchange
2140 * @arg: The argument to be passed to the response handler
2141 * @timer_msec: The timeout period for the exchange
2143 * The exchange response handler is set in this routine to resp()
2144 * function pointer. It can be called in two scenarios: if a timeout
2145 * occurs or if a response frame is received for the exchange. The
2146 * fc_frame pointer in response handler will also indicate timeout
2147 * as error using IS_ERR related macros.
2149 * The exchange destructor handler is also set in this routine.
2150 * The destructor handler is invoked by EM layer when exchange
2151 * is about to free, this can be used by caller to free its
2152 * resources along with exchange free.
2154 * The arg is passed back to resp and destructor handler.
2156 * The timeout value (in msec) for an exchange is set if non zero
2157 * timer_msec argument is specified. The timer is canceled when
2158 * it fires or when the exchange is done. The exchange timeout handler
2159 * is registered by EM layer.
2161 * The frame pointer with some of the header's fields must be
2162 * filled before calling this routine, those fields are:
2169 * - parameter or relative offset
2171 struct fc_seq
*fc_exch_seq_send(struct fc_lport
*lport
,
2172 struct fc_frame
*fp
,
2173 void (*resp
)(struct fc_seq
*,
2174 struct fc_frame
*fp
,
2176 void (*destructor
)(struct fc_seq
*, void *),
2177 void *arg
, u32 timer_msec
)
2180 struct fc_seq
*sp
= NULL
;
2181 struct fc_frame_header
*fh
;
2182 struct fc_fcp_pkt
*fsp
= NULL
;
2185 ep
= fc_exch_alloc(lport
, fp
);
2190 ep
->esb_stat
|= ESB_ST_SEQ_INIT
;
2191 fh
= fc_frame_header_get(fp
);
2192 fc_exch_set_addr(ep
, ntoh24(fh
->fh_s_id
), ntoh24(fh
->fh_d_id
));
2194 ep
->destructor
= destructor
;
2196 ep
->r_a_tov
= lport
->r_a_tov
;
2200 ep
->fh_type
= fh
->fh_type
; /* save for possbile timeout handling */
2201 ep
->f_ctl
= ntoh24(fh
->fh_f_ctl
);
2202 fc_exch_setup_hdr(ep
, fp
, ep
->f_ctl
);
2205 if (ep
->xid
<= lport
->lro_xid
&& fh
->fh_r_ctl
== FC_RCTL_DD_UNSOL_CMD
) {
2207 fc_fcp_ddp_setup(fr_fsp(fp
), ep
->xid
);
2210 if (unlikely(lport
->tt
.frame_send(lport
, fp
)))
2214 fc_exch_timer_set_locked(ep
, timer_msec
);
2215 ep
->f_ctl
&= ~FC_FC_FIRST_SEQ
; /* not first seq */
2217 if (ep
->f_ctl
& FC_FC_SEQ_INIT
)
2218 ep
->esb_stat
&= ~ESB_ST_SEQ_INIT
;
2219 spin_unlock_bh(&ep
->ex_lock
);
2223 fc_fcp_ddp_done(fsp
);
2224 rc
= fc_exch_done_locked(ep
);
2225 spin_unlock_bh(&ep
->ex_lock
);
2230 EXPORT_SYMBOL(fc_exch_seq_send
);
2233 * fc_exch_rrq() - Send an ELS RRQ (Reinstate Recovery Qualifier) command
2234 * @ep: The exchange to send the RRQ on
2236 * This tells the remote port to stop blocking the use of
2237 * the exchange and the seq_cnt range.
2239 static void fc_exch_rrq(struct fc_exch
*ep
)
2241 struct fc_lport
*lport
;
2242 struct fc_els_rrq
*rrq
;
2243 struct fc_frame
*fp
;
2248 fp
= fc_frame_alloc(lport
, sizeof(*rrq
));
2252 rrq
= fc_frame_payload_get(fp
, sizeof(*rrq
));
2253 memset(rrq
, 0, sizeof(*rrq
));
2254 rrq
->rrq_cmd
= ELS_RRQ
;
2255 hton24(rrq
->rrq_s_id
, ep
->sid
);
2256 rrq
->rrq_ox_id
= htons(ep
->oxid
);
2257 rrq
->rrq_rx_id
= htons(ep
->rxid
);
2260 if (ep
->esb_stat
& ESB_ST_RESP
)
2263 fc_fill_fc_hdr(fp
, FC_RCTL_ELS_REQ
, did
,
2264 lport
->port_id
, FC_TYPE_ELS
,
2265 FC_FC_FIRST_SEQ
| FC_FC_END_SEQ
| FC_FC_SEQ_INIT
, 0);
2267 if (fc_exch_seq_send(lport
, fp
, fc_exch_rrq_resp
, NULL
, ep
,
2272 FC_EXCH_DBG(ep
, "exch: RRQ send failed\n");
2273 spin_lock_bh(&ep
->ex_lock
);
2274 if (ep
->state
& (FC_EX_RST_CLEANUP
| FC_EX_DONE
)) {
2275 spin_unlock_bh(&ep
->ex_lock
);
2276 /* drop hold for rec qual */
2277 fc_exch_release(ep
);
2280 ep
->esb_stat
|= ESB_ST_REC_QUAL
;
2281 fc_exch_timer_set_locked(ep
, ep
->r_a_tov
);
2282 spin_unlock_bh(&ep
->ex_lock
);
2286 * fc_exch_els_rrq() - Handler for ELS RRQ (Reset Recovery Qualifier) requests
2287 * @fp: The RRQ frame, not freed here.
2289 static void fc_exch_els_rrq(struct fc_frame
*fp
)
2291 struct fc_lport
*lport
;
2292 struct fc_exch
*ep
= NULL
; /* request or subject exchange */
2293 struct fc_els_rrq
*rp
;
2296 enum fc_els_rjt_explan explan
;
2299 rp
= fc_frame_payload_get(fp
, sizeof(*rp
));
2300 explan
= ELS_EXPL_INV_LEN
;
2305 * lookup subject exchange.
2307 sid
= ntoh24(rp
->rrq_s_id
); /* subject source */
2308 xid
= fc_host_port_id(lport
->host
) == sid
?
2309 ntohs(rp
->rrq_ox_id
) : ntohs(rp
->rrq_rx_id
);
2310 ep
= fc_exch_lookup(lport
, xid
);
2311 explan
= ELS_EXPL_OXID_RXID
;
2314 spin_lock_bh(&ep
->ex_lock
);
2315 FC_EXCH_DBG(ep
, "RRQ request from %x: xid %x rxid %x oxid %x\n",
2316 sid
, xid
, ntohs(rp
->rrq_rx_id
), ntohs(rp
->rrq_ox_id
));
2317 if (ep
->oxid
!= ntohs(rp
->rrq_ox_id
))
2319 if (ep
->rxid
!= ntohs(rp
->rrq_rx_id
) &&
2320 ep
->rxid
!= FC_XID_UNKNOWN
)
2322 explan
= ELS_EXPL_SID
;
2327 * Clear Recovery Qualifier state, and cancel timer if complete.
2329 if (ep
->esb_stat
& ESB_ST_REC_QUAL
) {
2330 ep
->esb_stat
&= ~ESB_ST_REC_QUAL
;
2331 atomic_dec(&ep
->ex_refcnt
); /* drop hold for rec qual */
2333 if (ep
->esb_stat
& ESB_ST_COMPLETE
)
2334 fc_exch_timer_cancel(ep
);
2336 spin_unlock_bh(&ep
->ex_lock
);
2345 spin_unlock_bh(&ep
->ex_lock
);
2347 fc_seq_ls_rjt(fp
, ELS_RJT_LOGIC
, explan
);
2350 fc_exch_release(ep
); /* drop hold from fc_exch_find */
2354 * fc_exch_update_stats() - update exches stats to lport
2355 * @lport: The local port to update exchange manager stats
2357 void fc_exch_update_stats(struct fc_lport
*lport
)
2359 struct fc_host_statistics
*st
;
2360 struct fc_exch_mgr_anchor
*ema
;
2361 struct fc_exch_mgr
*mp
;
2363 st
= &lport
->host_stats
;
2365 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
) {
2367 st
->fc_no_free_exch
+= atomic_read(&mp
->stats
.no_free_exch
);
2368 st
->fc_no_free_exch_xid
+=
2369 atomic_read(&mp
->stats
.no_free_exch_xid
);
2370 st
->fc_xid_not_found
+= atomic_read(&mp
->stats
.xid_not_found
);
2371 st
->fc_xid_busy
+= atomic_read(&mp
->stats
.xid_busy
);
2372 st
->fc_seq_not_found
+= atomic_read(&mp
->stats
.seq_not_found
);
2373 st
->fc_non_bls_resp
+= atomic_read(&mp
->stats
.non_bls_resp
);
2376 EXPORT_SYMBOL(fc_exch_update_stats
);
2379 * fc_exch_mgr_add() - Add an exchange manager to a local port's list of EMs
2380 * @lport: The local port to add the exchange manager to
2381 * @mp: The exchange manager to be added to the local port
2382 * @match: The match routine that indicates when this EM should be used
2384 struct fc_exch_mgr_anchor
*fc_exch_mgr_add(struct fc_lport
*lport
,
2385 struct fc_exch_mgr
*mp
,
2386 bool (*match
)(struct fc_frame
*))
2388 struct fc_exch_mgr_anchor
*ema
;
2390 ema
= kmalloc(sizeof(*ema
), GFP_ATOMIC
);
2396 /* add EM anchor to EM anchors list */
2397 list_add_tail(&ema
->ema_list
, &lport
->ema_list
);
2398 kref_get(&mp
->kref
);
2401 EXPORT_SYMBOL(fc_exch_mgr_add
);
2404 * fc_exch_mgr_destroy() - Destroy an exchange manager
2405 * @kref: The reference to the EM to be destroyed
2407 static void fc_exch_mgr_destroy(struct kref
*kref
)
2409 struct fc_exch_mgr
*mp
= container_of(kref
, struct fc_exch_mgr
, kref
);
2411 mempool_destroy(mp
->ep_pool
);
2412 free_percpu(mp
->pool
);
2417 * fc_exch_mgr_del() - Delete an EM from a local port's list
2418 * @ema: The exchange manager anchor identifying the EM to be deleted
2420 void fc_exch_mgr_del(struct fc_exch_mgr_anchor
*ema
)
2422 /* remove EM anchor from EM anchors list */
2423 list_del(&ema
->ema_list
);
2424 kref_put(&ema
->mp
->kref
, fc_exch_mgr_destroy
);
2427 EXPORT_SYMBOL(fc_exch_mgr_del
);
2430 * fc_exch_mgr_list_clone() - Share all exchange manager objects
2431 * @src: Source lport to clone exchange managers from
2432 * @dst: New lport that takes references to all the exchange managers
2434 int fc_exch_mgr_list_clone(struct fc_lport
*src
, struct fc_lport
*dst
)
2436 struct fc_exch_mgr_anchor
*ema
, *tmp
;
2438 list_for_each_entry(ema
, &src
->ema_list
, ema_list
) {
2439 if (!fc_exch_mgr_add(dst
, ema
->mp
, ema
->match
))
2444 list_for_each_entry_safe(ema
, tmp
, &dst
->ema_list
, ema_list
)
2445 fc_exch_mgr_del(ema
);
2448 EXPORT_SYMBOL(fc_exch_mgr_list_clone
);
2451 * fc_exch_mgr_alloc() - Allocate an exchange manager
2452 * @lport: The local port that the new EM will be associated with
2453 * @class: The default FC class for new exchanges
2454 * @min_xid: The minimum XID for exchanges from the new EM
2455 * @max_xid: The maximum XID for exchanges from the new EM
2456 * @match: The match routine for the new EM
2458 struct fc_exch_mgr
*fc_exch_mgr_alloc(struct fc_lport
*lport
,
2459 enum fc_class
class,
2460 u16 min_xid
, u16 max_xid
,
2461 bool (*match
)(struct fc_frame
*))
2463 struct fc_exch_mgr
*mp
;
2464 u16 pool_exch_range
;
2467 struct fc_exch_pool
*pool
;
2469 if (max_xid
<= min_xid
|| max_xid
== FC_XID_UNKNOWN
||
2470 (min_xid
& fc_cpu_mask
) != 0) {
2471 FC_LPORT_DBG(lport
, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
2477 * allocate memory for EM
2479 mp
= kzalloc(sizeof(struct fc_exch_mgr
), GFP_ATOMIC
);
2485 /* adjust em exch xid range for offload */
2486 mp
->min_xid
= min_xid
;
2488 /* reduce range so per cpu pool fits into PCPU_MIN_UNIT_SIZE pool */
2489 pool_exch_range
= (PCPU_MIN_UNIT_SIZE
- sizeof(*pool
)) /
2490 sizeof(struct fc_exch
*);
2491 if ((max_xid
- min_xid
+ 1) / (fc_cpu_mask
+ 1) > pool_exch_range
) {
2492 mp
->max_xid
= pool_exch_range
* (fc_cpu_mask
+ 1) +
2495 mp
->max_xid
= max_xid
;
2496 pool_exch_range
= (mp
->max_xid
- mp
->min_xid
+ 1) /
2500 mp
->ep_pool
= mempool_create_slab_pool(2, fc_em_cachep
);
2505 * Setup per cpu exch pool with entire exchange id range equally
2506 * divided across all cpus. The exch pointers array memory is
2507 * allocated for exch range per pool.
2509 mp
->pool_max_index
= pool_exch_range
- 1;
2512 * Allocate and initialize per cpu exch pool
2514 pool_size
= sizeof(*pool
) + pool_exch_range
* sizeof(struct fc_exch
*);
2515 mp
->pool
= __alloc_percpu(pool_size
, __alignof__(struct fc_exch_pool
));
2518 for_each_possible_cpu(cpu
) {
2519 pool
= per_cpu_ptr(mp
->pool
, cpu
);
2520 pool
->next_index
= 0;
2521 pool
->left
= FC_XID_UNKNOWN
;
2522 pool
->right
= FC_XID_UNKNOWN
;
2523 spin_lock_init(&pool
->lock
);
2524 INIT_LIST_HEAD(&pool
->ex_list
);
2527 kref_init(&mp
->kref
);
2528 if (!fc_exch_mgr_add(lport
, mp
, match
)) {
2529 free_percpu(mp
->pool
);
2534 * Above kref_init() sets mp->kref to 1 and then
2535 * call to fc_exch_mgr_add incremented mp->kref again,
2536 * so adjust that extra increment.
2538 kref_put(&mp
->kref
, fc_exch_mgr_destroy
);
2542 mempool_destroy(mp
->ep_pool
);
2547 EXPORT_SYMBOL(fc_exch_mgr_alloc
);
2550 * fc_exch_mgr_free() - Free all exchange managers on a local port
2551 * @lport: The local port whose EMs are to be freed
2553 void fc_exch_mgr_free(struct fc_lport
*lport
)
2555 struct fc_exch_mgr_anchor
*ema
, *next
;
2557 flush_workqueue(fc_exch_workqueue
);
2558 list_for_each_entry_safe(ema
, next
, &lport
->ema_list
, ema_list
)
2559 fc_exch_mgr_del(ema
);
2561 EXPORT_SYMBOL(fc_exch_mgr_free
);
2564 * fc_find_ema() - Lookup and return appropriate Exchange Manager Anchor depending
2567 * @lport: The local port the frame was received on
2568 * @fh: The received frame header
2570 static struct fc_exch_mgr_anchor
*fc_find_ema(u32 f_ctl
,
2571 struct fc_lport
*lport
,
2572 struct fc_frame_header
*fh
)
2574 struct fc_exch_mgr_anchor
*ema
;
2577 if (f_ctl
& FC_FC_EX_CTX
)
2578 xid
= ntohs(fh
->fh_ox_id
);
2580 xid
= ntohs(fh
->fh_rx_id
);
2581 if (xid
== FC_XID_UNKNOWN
)
2582 return list_entry(lport
->ema_list
.prev
,
2583 typeof(*ema
), ema_list
);
2586 list_for_each_entry(ema
, &lport
->ema_list
, ema_list
) {
2587 if ((xid
>= ema
->mp
->min_xid
) &&
2588 (xid
<= ema
->mp
->max_xid
))
2594 * fc_exch_recv() - Handler for received frames
2595 * @lport: The local port the frame was received on
2596 * @fp: The received frame
2598 void fc_exch_recv(struct fc_lport
*lport
, struct fc_frame
*fp
)
2600 struct fc_frame_header
*fh
= fc_frame_header_get(fp
);
2601 struct fc_exch_mgr_anchor
*ema
;
2605 if (!lport
|| lport
->state
== LPORT_ST_DISABLED
) {
2606 FC_LPORT_DBG(lport
, "Receiving frames for an lport that "
2607 "has not been initialized correctly\n");
2612 f_ctl
= ntoh24(fh
->fh_f_ctl
);
2613 ema
= fc_find_ema(f_ctl
, lport
, fh
);
2615 FC_LPORT_DBG(lport
, "Unable to find Exchange Manager Anchor,"
2616 "fc_ctl <0x%x>, xid <0x%x>\n",
2618 (f_ctl
& FC_FC_EX_CTX
) ?
2619 ntohs(fh
->fh_ox_id
) :
2620 ntohs(fh
->fh_rx_id
));
2626 * If frame is marked invalid, just drop it.
2628 switch (fr_eof(fp
)) {
2630 if (f_ctl
& FC_FC_END_SEQ
)
2631 skb_trim(fp_skb(fp
), fr_len(fp
) - FC_FC_FILL(f_ctl
));
2634 if (fh
->fh_type
== FC_TYPE_BLS
)
2635 fc_exch_recv_bls(ema
->mp
, fp
);
2636 else if ((f_ctl
& (FC_FC_EX_CTX
| FC_FC_SEQ_CTX
)) ==
2638 fc_exch_recv_seq_resp(ema
->mp
, fp
);
2639 else if (f_ctl
& FC_FC_SEQ_CTX
)
2640 fc_exch_recv_resp(ema
->mp
, fp
);
2641 else /* no EX_CTX and no SEQ_CTX */
2642 fc_exch_recv_req(lport
, ema
->mp
, fp
);
2645 FC_LPORT_DBG(lport
, "dropping invalid frame (eof %x)",
2650 EXPORT_SYMBOL(fc_exch_recv
);
2653 * fc_exch_init() - Initialize the exchange layer for a local port
2654 * @lport: The local port to initialize the exchange layer for
2656 int fc_exch_init(struct fc_lport
*lport
)
2658 if (!lport
->tt
.exch_mgr_reset
)
2659 lport
->tt
.exch_mgr_reset
= fc_exch_mgr_reset
;
2663 EXPORT_SYMBOL(fc_exch_init
);
2666 * fc_setup_exch_mgr() - Setup an exchange manager
2668 int fc_setup_exch_mgr(void)
2670 fc_em_cachep
= kmem_cache_create("libfc_em", sizeof(struct fc_exch
),
2671 0, SLAB_HWCACHE_ALIGN
, NULL
);
2676 * Initialize fc_cpu_mask and fc_cpu_order. The
2677 * fc_cpu_mask is set for nr_cpu_ids rounded up
2678 * to order of 2's * power and order is stored
2679 * in fc_cpu_order as this is later required in
2680 * mapping between an exch id and exch array index
2681 * in per cpu exch pool.
2683 * This round up is required to align fc_cpu_mask
2684 * to exchange id's lower bits such that all incoming
2685 * frames of an exchange gets delivered to the same
2686 * cpu on which exchange originated by simple bitwise
2687 * AND operation between fc_cpu_mask and exchange id.
2689 fc_cpu_order
= ilog2(roundup_pow_of_two(nr_cpu_ids
));
2690 fc_cpu_mask
= (1 << fc_cpu_order
) - 1;
2692 fc_exch_workqueue
= create_singlethread_workqueue("fc_exch_workqueue");
2693 if (!fc_exch_workqueue
)
2697 kmem_cache_destroy(fc_em_cachep
);
2702 * fc_destroy_exch_mgr() - Destroy an exchange manager
2704 void fc_destroy_exch_mgr(void)
2706 destroy_workqueue(fc_exch_workqueue
);
2707 kmem_cache_destroy(fc_em_cachep
);