init from v2.6.32.60
[mach-moxart.git] / drivers / scsi / libfc / fc_rport.c
blobff558a631d17dcf951c22e77c70d48b3bc256e91
1 /*
2 * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 * Maintained at www.Open-FCoE.org
21 * RPORT GENERAL INFO
23 * This file contains all processing regarding fc_rports. It contains the
24 * rport state machine and does all rport interaction with the transport class.
25 * There should be no other places in libfc that interact directly with the
26 * transport class in regards to adding and deleting rports.
28 * fc_rport's represent N_Port's within the fabric.
32 * RPORT LOCKING
34 * The rport should never hold the rport mutex and then attempt to acquire
35 * either the lport or disc mutexes. The rport's mutex is considered lesser
36 * than both the lport's mutex and the disc mutex. Refer to fc_lport.c for
37 * more comments on the heirarchy.
39 * The locking strategy is similar to the lport's strategy. The lock protects
40 * the rport's states and is held and released by the entry points to the rport
41 * block. All _enter_* functions correspond to rport states and expect the rport
42 * mutex to be locked before calling them. This means that rports only handle
43 * one request or response at a time, since they're not critical for the I/O
44 * path this potential over-use of the mutex is acceptable.
47 #include <linux/kernel.h>
48 #include <linux/spinlock.h>
49 #include <linux/interrupt.h>
50 #include <linux/rcupdate.h>
51 #include <linux/timer.h>
52 #include <linux/workqueue.h>
53 #include <asm/unaligned.h>
55 #include <scsi/libfc.h>
56 #include <scsi/fc_encode.h>
58 struct workqueue_struct *rport_event_queue;
60 static void fc_rport_enter_plogi(struct fc_rport_priv *);
61 static void fc_rport_enter_prli(struct fc_rport_priv *);
62 static void fc_rport_enter_rtv(struct fc_rport_priv *);
63 static void fc_rport_enter_ready(struct fc_rport_priv *);
64 static void fc_rport_enter_logo(struct fc_rport_priv *);
65 static void fc_rport_enter_adisc(struct fc_rport_priv *);
67 static void fc_rport_recv_plogi_req(struct fc_lport *,
68 struct fc_seq *, struct fc_frame *);
69 static void fc_rport_recv_prli_req(struct fc_rport_priv *,
70 struct fc_seq *, struct fc_frame *);
71 static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
72 struct fc_seq *, struct fc_frame *);
73 static void fc_rport_recv_logo_req(struct fc_lport *,
74 struct fc_seq *, struct fc_frame *);
75 static void fc_rport_timeout(struct work_struct *);
76 static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
77 static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
78 static void fc_rport_work(struct work_struct *);
80 static const char *fc_rport_state_names[] = {
81 [RPORT_ST_INIT] = "Init",
82 [RPORT_ST_PLOGI] = "PLOGI",
83 [RPORT_ST_PRLI] = "PRLI",
84 [RPORT_ST_RTV] = "RTV",
85 [RPORT_ST_READY] = "Ready",
86 [RPORT_ST_LOGO] = "LOGO",
87 [RPORT_ST_ADISC] = "ADISC",
88 [RPORT_ST_DELETE] = "Delete",
89 [RPORT_ST_RESTART] = "Restart",
92 /**
93 * fc_rport_lookup() - lookup a remote port by port_id
94 * @lport: Fibre Channel host port instance
95 * @port_id: remote port port_id to match
97 static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
98 u32 port_id)
100 struct fc_rport_priv *rdata;
102 list_for_each_entry(rdata, &lport->disc.rports, peers)
103 if (rdata->ids.port_id == port_id)
104 return rdata;
105 return NULL;
109 * fc_rport_create() - Create a new remote port
110 * @lport: The local port that the new remote port is for
111 * @port_id: The port ID for the new remote port
113 * Locking note: must be called with the disc_mutex held.
115 static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
116 u32 port_id)
118 struct fc_rport_priv *rdata;
120 rdata = lport->tt.rport_lookup(lport, port_id);
121 if (rdata)
122 return rdata;
124 rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
125 if (!rdata)
126 return NULL;
128 rdata->ids.node_name = -1;
129 rdata->ids.port_name = -1;
130 rdata->ids.port_id = port_id;
131 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
133 kref_init(&rdata->kref);
134 mutex_init(&rdata->rp_mutex);
135 rdata->local_port = lport;
136 rdata->rp_state = RPORT_ST_INIT;
137 rdata->event = RPORT_EV_NONE;
138 rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
139 rdata->e_d_tov = lport->e_d_tov;
140 rdata->r_a_tov = lport->r_a_tov;
141 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
142 INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
143 INIT_WORK(&rdata->event_work, fc_rport_work);
144 if (port_id != FC_FID_DIR_SERV)
145 list_add(&rdata->peers, &lport->disc.rports);
146 return rdata;
150 * fc_rport_destroy() - free a remote port after last reference is released.
151 * @kref: pointer to kref inside struct fc_rport_priv
153 static void fc_rport_destroy(struct kref *kref)
155 struct fc_rport_priv *rdata;
157 rdata = container_of(kref, struct fc_rport_priv, kref);
158 kfree(rdata);
162 * fc_rport_state() - return a string for the state the rport is in
163 * @rdata: remote port private data
165 static const char *fc_rport_state(struct fc_rport_priv *rdata)
167 const char *cp;
169 cp = fc_rport_state_names[rdata->rp_state];
170 if (!cp)
171 cp = "Unknown";
172 return cp;
176 * fc_set_rport_loss_tmo() - Set the remote port loss timeout in seconds.
177 * @rport: Pointer to Fibre Channel remote port structure
178 * @timeout: timeout in seconds
180 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
182 if (timeout)
183 rport->dev_loss_tmo = timeout + 5;
184 else
185 rport->dev_loss_tmo = 30;
187 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
190 * fc_plogi_get_maxframe() - Get max payload from the common service parameters
191 * @flp: FLOGI payload structure
192 * @maxval: upper limit, may be less than what is in the service parameters
194 static unsigned int fc_plogi_get_maxframe(struct fc_els_flogi *flp,
195 unsigned int maxval)
197 unsigned int mfs;
200 * Get max payload from the common service parameters and the
201 * class 3 receive data field size.
203 mfs = ntohs(flp->fl_csp.sp_bb_data) & FC_SP_BB_DATA_MASK;
204 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
205 maxval = mfs;
206 mfs = ntohs(flp->fl_cssp[3 - 1].cp_rdfs);
207 if (mfs >= FC_SP_MIN_MAX_PAYLOAD && mfs < maxval)
208 maxval = mfs;
209 return maxval;
213 * fc_rport_state_enter() - Change the rport's state
214 * @rdata: The rport whose state should change
215 * @new: The new state of the rport
217 * Locking Note: Called with the rport lock held
219 static void fc_rport_state_enter(struct fc_rport_priv *rdata,
220 enum fc_rport_state new)
222 if (rdata->rp_state != new)
223 rdata->retries = 0;
224 rdata->rp_state = new;
227 static void fc_rport_work(struct work_struct *work)
229 u32 port_id;
230 struct fc_rport_priv *rdata =
231 container_of(work, struct fc_rport_priv, event_work);
232 struct fc_rport_libfc_priv *rp;
233 enum fc_rport_event event;
234 struct fc_lport *lport = rdata->local_port;
235 struct fc_rport_operations *rport_ops;
236 struct fc_rport_identifiers ids;
237 struct fc_rport *rport;
238 int restart = 0;
240 mutex_lock(&rdata->rp_mutex);
241 event = rdata->event;
242 rport_ops = rdata->ops;
243 rport = rdata->rport;
245 FC_RPORT_DBG(rdata, "work event %u\n", event);
247 switch (event) {
248 case RPORT_EV_READY:
249 ids = rdata->ids;
250 rdata->event = RPORT_EV_NONE;
251 kref_get(&rdata->kref);
252 mutex_unlock(&rdata->rp_mutex);
254 if (!rport)
255 rport = fc_remote_port_add(lport->host, 0, &ids);
256 if (!rport) {
257 FC_RPORT_DBG(rdata, "Failed to add the rport\n");
258 lport->tt.rport_logoff(rdata);
259 kref_put(&rdata->kref, lport->tt.rport_destroy);
260 return;
262 mutex_lock(&rdata->rp_mutex);
263 if (rdata->rport)
264 FC_RPORT_DBG(rdata, "rport already allocated\n");
265 rdata->rport = rport;
266 rport->maxframe_size = rdata->maxframe_size;
267 rport->supported_classes = rdata->supported_classes;
269 rp = rport->dd_data;
270 rp->local_port = lport;
271 rp->rp_state = rdata->rp_state;
272 rp->flags = rdata->flags;
273 rp->e_d_tov = rdata->e_d_tov;
274 rp->r_a_tov = rdata->r_a_tov;
275 mutex_unlock(&rdata->rp_mutex);
277 if (rport_ops && rport_ops->event_callback) {
278 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
279 rport_ops->event_callback(lport, rdata, event);
281 kref_put(&rdata->kref, lport->tt.rport_destroy);
282 break;
284 case RPORT_EV_FAILED:
285 case RPORT_EV_LOGO:
286 case RPORT_EV_STOP:
287 port_id = rdata->ids.port_id;
288 mutex_unlock(&rdata->rp_mutex);
290 if (port_id != FC_FID_DIR_SERV) {
292 * We must drop rp_mutex before taking disc_mutex.
293 * Re-evaluate state to allow for restart.
294 * A transition to RESTART state must only happen
295 * while disc_mutex is held and rdata is on the list.
297 mutex_lock(&lport->disc.disc_mutex);
298 mutex_lock(&rdata->rp_mutex);
299 if (rdata->rp_state == RPORT_ST_RESTART)
300 restart = 1;
301 else
302 list_del(&rdata->peers);
303 rdata->event = RPORT_EV_NONE;
304 mutex_unlock(&rdata->rp_mutex);
305 mutex_unlock(&lport->disc.disc_mutex);
308 if (rport_ops && rport_ops->event_callback) {
309 FC_RPORT_DBG(rdata, "callback ev %d\n", event);
310 rport_ops->event_callback(lport, rdata, event);
312 cancel_delayed_work_sync(&rdata->retry_work);
315 * Reset any outstanding exchanges before freeing rport.
317 lport->tt.exch_mgr_reset(lport, 0, port_id);
318 lport->tt.exch_mgr_reset(lport, port_id, 0);
320 if (rport) {
321 rp = rport->dd_data;
322 rp->rp_state = RPORT_ST_DELETE;
323 mutex_lock(&rdata->rp_mutex);
324 rdata->rport = NULL;
325 mutex_unlock(&rdata->rp_mutex);
326 fc_remote_port_delete(rport);
328 if (restart) {
329 mutex_lock(&rdata->rp_mutex);
330 FC_RPORT_DBG(rdata, "work restart\n");
331 fc_rport_enter_plogi(rdata);
332 mutex_unlock(&rdata->rp_mutex);
333 } else
334 kref_put(&rdata->kref, lport->tt.rport_destroy);
335 break;
337 default:
338 mutex_unlock(&rdata->rp_mutex);
339 break;
344 * fc_rport_login() - Start the remote port login state machine
345 * @rdata: private remote port
347 * Locking Note: Called without the rport lock held. This
348 * function will hold the rport lock, call an _enter_*
349 * function and then unlock the rport.
351 * This indicates the intent to be logged into the remote port.
352 * If it appears we are already logged in, ADISC is used to verify
353 * the setup.
355 int fc_rport_login(struct fc_rport_priv *rdata)
357 mutex_lock(&rdata->rp_mutex);
359 switch (rdata->rp_state) {
360 case RPORT_ST_READY:
361 FC_RPORT_DBG(rdata, "ADISC port\n");
362 fc_rport_enter_adisc(rdata);
363 break;
364 case RPORT_ST_RESTART:
365 break;
366 case RPORT_ST_DELETE:
367 FC_RPORT_DBG(rdata, "Restart deleted port\n");
368 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
369 break;
370 default:
371 FC_RPORT_DBG(rdata, "Login to port\n");
372 fc_rport_enter_plogi(rdata);
373 break;
375 mutex_unlock(&rdata->rp_mutex);
377 return 0;
381 * fc_rport_enter_delete() - schedule a remote port to be deleted.
382 * @rdata: private remote port
383 * @event: event to report as the reason for deletion
385 * Locking Note: Called with the rport lock held.
387 * Allow state change into DELETE only once.
389 * Call queue_work only if there's no event already pending.
390 * Set the new event so that the old pending event will not occur.
391 * Since we have the mutex, even if fc_rport_work() is already started,
392 * it'll see the new event.
394 static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
395 enum fc_rport_event event)
397 if (rdata->rp_state == RPORT_ST_DELETE)
398 return;
400 FC_RPORT_DBG(rdata, "Delete port\n");
402 fc_rport_state_enter(rdata, RPORT_ST_DELETE);
404 if (rdata->event == RPORT_EV_NONE)
405 queue_work(rport_event_queue, &rdata->event_work);
406 rdata->event = event;
410 * fc_rport_logoff() - Logoff and remove an rport
411 * @rdata: private remote port
413 * Locking Note: Called without the rport lock held. This
414 * function will hold the rport lock, call an _enter_*
415 * function and then unlock the rport.
417 int fc_rport_logoff(struct fc_rport_priv *rdata)
419 mutex_lock(&rdata->rp_mutex);
421 FC_RPORT_DBG(rdata, "Remove port\n");
423 if (rdata->rp_state == RPORT_ST_DELETE) {
424 FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
425 goto out;
428 if (rdata->rp_state == RPORT_ST_RESTART)
429 FC_RPORT_DBG(rdata, "Port in Restart state, deleting\n");
430 else
431 fc_rport_enter_logo(rdata);
434 * Change the state to Delete so that we discard
435 * the response.
437 fc_rport_enter_delete(rdata, RPORT_EV_STOP);
438 out:
439 mutex_unlock(&rdata->rp_mutex);
440 return 0;
444 * fc_rport_enter_ready() - The rport is ready
445 * @rdata: private remote port
447 * Locking Note: The rport lock is expected to be held before calling
448 * this routine.
450 static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
452 fc_rport_state_enter(rdata, RPORT_ST_READY);
454 FC_RPORT_DBG(rdata, "Port is Ready\n");
456 if (rdata->event == RPORT_EV_NONE)
457 queue_work(rport_event_queue, &rdata->event_work);
458 rdata->event = RPORT_EV_READY;
462 * fc_rport_timeout() - Handler for the retry_work timer.
463 * @work: The work struct of the fc_rport_priv
465 * Locking Note: Called without the rport lock held. This
466 * function will hold the rport lock, call an _enter_*
467 * function and then unlock the rport.
469 static void fc_rport_timeout(struct work_struct *work)
471 struct fc_rport_priv *rdata =
472 container_of(work, struct fc_rport_priv, retry_work.work);
474 mutex_lock(&rdata->rp_mutex);
476 switch (rdata->rp_state) {
477 case RPORT_ST_PLOGI:
478 fc_rport_enter_plogi(rdata);
479 break;
480 case RPORT_ST_PRLI:
481 fc_rport_enter_prli(rdata);
482 break;
483 case RPORT_ST_RTV:
484 fc_rport_enter_rtv(rdata);
485 break;
486 case RPORT_ST_LOGO:
487 fc_rport_enter_logo(rdata);
488 break;
489 case RPORT_ST_ADISC:
490 fc_rport_enter_adisc(rdata);
491 break;
492 case RPORT_ST_READY:
493 case RPORT_ST_INIT:
494 case RPORT_ST_DELETE:
495 case RPORT_ST_RESTART:
496 break;
499 mutex_unlock(&rdata->rp_mutex);
503 * fc_rport_error() - Error handler, called once retries have been exhausted
504 * @rdata: private remote port
505 * @fp: The frame pointer
507 * Locking Note: The rport lock is expected to be held before
508 * calling this routine
510 static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
512 FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
513 IS_ERR(fp) ? -PTR_ERR(fp) : 0,
514 fc_rport_state(rdata), rdata->retries);
516 switch (rdata->rp_state) {
517 case RPORT_ST_PLOGI:
518 case RPORT_ST_LOGO:
519 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
520 break;
521 case RPORT_ST_RTV:
522 fc_rport_enter_ready(rdata);
523 break;
524 case RPORT_ST_PRLI:
525 case RPORT_ST_ADISC:
526 fc_rport_enter_logo(rdata);
527 break;
528 case RPORT_ST_DELETE:
529 case RPORT_ST_RESTART:
530 case RPORT_ST_READY:
531 case RPORT_ST_INIT:
532 break;
537 * fc_rport_error_retry() - Error handler when retries are desired
538 * @rdata: private remote port data
539 * @fp: The frame pointer
541 * If the error was an exchange timeout retry immediately,
542 * otherwise wait for E_D_TOV.
544 * Locking Note: The rport lock is expected to be held before
545 * calling this routine
547 static void fc_rport_error_retry(struct fc_rport_priv *rdata,
548 struct fc_frame *fp)
550 unsigned long delay = FC_DEF_E_D_TOV;
552 /* make sure this isn't an FC_EX_CLOSED error, never retry those */
553 if (PTR_ERR(fp) == -FC_EX_CLOSED)
554 return fc_rport_error(rdata, fp);
556 if (rdata->retries < rdata->local_port->max_rport_retry_count) {
557 FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
558 PTR_ERR(fp), fc_rport_state(rdata));
559 rdata->retries++;
560 /* no additional delay on exchange timeouts */
561 if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
562 delay = 0;
563 schedule_delayed_work(&rdata->retry_work, delay);
564 return;
567 return fc_rport_error(rdata, fp);
571 * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
572 * @sp: current sequence in the PLOGI exchange
573 * @fp: response frame
574 * @rdata_arg: private remote port data
576 * Locking Note: This function will be called without the rport lock
577 * held, but it will lock, call an _enter_* function or fc_rport_error
578 * and then unlock the rport.
580 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
581 void *rdata_arg)
583 struct fc_rport_priv *rdata = rdata_arg;
584 struct fc_lport *lport = rdata->local_port;
585 struct fc_els_flogi *plp = NULL;
586 unsigned int tov;
587 u16 csp_seq;
588 u16 cssp_seq;
589 u8 op;
591 mutex_lock(&rdata->rp_mutex);
593 FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
595 if (rdata->rp_state != RPORT_ST_PLOGI) {
596 FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
597 "%s\n", fc_rport_state(rdata));
598 if (IS_ERR(fp))
599 goto err;
600 goto out;
603 if (IS_ERR(fp)) {
604 fc_rport_error_retry(rdata, fp);
605 goto err;
608 op = fc_frame_payload_op(fp);
609 if (op == ELS_LS_ACC &&
610 (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
611 rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
612 rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
614 tov = ntohl(plp->fl_csp.sp_e_d_tov);
615 if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
616 tov /= 1000;
617 if (tov > rdata->e_d_tov)
618 rdata->e_d_tov = tov;
619 csp_seq = ntohs(plp->fl_csp.sp_tot_seq);
620 cssp_seq = ntohs(plp->fl_cssp[3 - 1].cp_con_seq);
621 if (cssp_seq < csp_seq)
622 csp_seq = cssp_seq;
623 rdata->max_seq = csp_seq;
624 rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
625 fc_rport_enter_prli(rdata);
626 } else
627 fc_rport_error_retry(rdata, fp);
629 out:
630 fc_frame_free(fp);
631 err:
632 mutex_unlock(&rdata->rp_mutex);
633 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
637 * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
638 * @rdata: private remote port data
640 * Locking Note: The rport lock is expected to be held before calling
641 * this routine.
643 static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
645 struct fc_lport *lport = rdata->local_port;
646 struct fc_frame *fp;
648 FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
649 fc_rport_state(rdata));
651 fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
653 rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
654 fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
655 if (!fp) {
656 fc_rport_error_retry(rdata, fp);
657 return;
659 rdata->e_d_tov = lport->e_d_tov;
661 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
662 fc_rport_plogi_resp, rdata, lport->e_d_tov))
663 fc_rport_error_retry(rdata, NULL);
664 else
665 kref_get(&rdata->kref);
669 * fc_rport_prli_resp() - Process Login (PRLI) response handler
670 * @sp: current sequence in the PRLI exchange
671 * @fp: response frame
672 * @rdata_arg: private remote port data
674 * Locking Note: This function will be called without the rport lock
675 * held, but it will lock, call an _enter_* function or fc_rport_error
676 * and then unlock the rport.
678 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
679 void *rdata_arg)
681 struct fc_rport_priv *rdata = rdata_arg;
682 struct {
683 struct fc_els_prli prli;
684 struct fc_els_spp spp;
685 } *pp;
686 u32 roles = FC_RPORT_ROLE_UNKNOWN;
687 u32 fcp_parm = 0;
688 u8 op;
690 mutex_lock(&rdata->rp_mutex);
692 FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
694 if (rdata->rp_state != RPORT_ST_PRLI) {
695 FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
696 "%s\n", fc_rport_state(rdata));
697 if (IS_ERR(fp))
698 goto err;
699 goto out;
702 if (IS_ERR(fp)) {
703 fc_rport_error_retry(rdata, fp);
704 goto err;
707 /* reinitialize remote port roles */
708 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
710 op = fc_frame_payload_op(fp);
711 if (op == ELS_LS_ACC) {
712 pp = fc_frame_payload_get(fp, sizeof(*pp));
713 if (pp && pp->prli.prli_spp_len >= sizeof(pp->spp)) {
714 fcp_parm = ntohl(pp->spp.spp_params);
715 if (fcp_parm & FCP_SPPF_RETRY)
716 rdata->flags |= FC_RP_FLAGS_RETRY;
719 rdata->supported_classes = FC_COS_CLASS3;
720 if (fcp_parm & FCP_SPPF_INIT_FCN)
721 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
722 if (fcp_parm & FCP_SPPF_TARG_FCN)
723 roles |= FC_RPORT_ROLE_FCP_TARGET;
725 rdata->ids.roles = roles;
726 fc_rport_enter_rtv(rdata);
728 } else {
729 FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
730 fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
733 out:
734 fc_frame_free(fp);
735 err:
736 mutex_unlock(&rdata->rp_mutex);
737 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
741 * fc_rport_logo_resp() - Logout (LOGO) response handler
742 * @sp: current sequence in the LOGO exchange
743 * @fp: response frame
744 * @rdata_arg: private remote port data
746 * Locking Note: This function will be called without the rport lock
747 * held, but it will lock, call an _enter_* function or fc_rport_error
748 * and then unlock the rport.
750 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
751 void *rdata_arg)
753 struct fc_rport_priv *rdata = rdata_arg;
754 u8 op;
756 mutex_lock(&rdata->rp_mutex);
758 FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
760 if (rdata->rp_state != RPORT_ST_LOGO) {
761 FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
762 "%s\n", fc_rport_state(rdata));
763 if (IS_ERR(fp))
764 goto err;
765 goto out;
768 if (IS_ERR(fp)) {
769 fc_rport_error_retry(rdata, fp);
770 goto err;
773 op = fc_frame_payload_op(fp);
774 if (op != ELS_LS_ACC)
775 FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
776 op);
777 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
779 out:
780 fc_frame_free(fp);
781 err:
782 mutex_unlock(&rdata->rp_mutex);
783 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
787 * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
788 * @rdata: private remote port data
790 * Locking Note: The rport lock is expected to be held before calling
791 * this routine.
793 static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
795 struct fc_lport *lport = rdata->local_port;
796 struct {
797 struct fc_els_prli prli;
798 struct fc_els_spp spp;
799 } *pp;
800 struct fc_frame *fp;
803 * If the rport is one of the well known addresses
804 * we skip PRLI and RTV and go straight to READY.
806 if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
807 fc_rport_enter_ready(rdata);
808 return;
811 FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
812 fc_rport_state(rdata));
814 fc_rport_state_enter(rdata, RPORT_ST_PRLI);
816 fp = fc_frame_alloc(lport, sizeof(*pp));
817 if (!fp) {
818 fc_rport_error_retry(rdata, fp);
819 return;
822 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
823 fc_rport_prli_resp, rdata, lport->e_d_tov))
824 fc_rport_error_retry(rdata, NULL);
825 else
826 kref_get(&rdata->kref);
830 * fc_rport_els_rtv_resp() - Request Timeout Value response handler
831 * @sp: current sequence in the RTV exchange
832 * @fp: response frame
833 * @rdata_arg: private remote port data
835 * Many targets don't seem to support this.
837 * Locking Note: This function will be called without the rport lock
838 * held, but it will lock, call an _enter_* function or fc_rport_error
839 * and then unlock the rport.
841 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
842 void *rdata_arg)
844 struct fc_rport_priv *rdata = rdata_arg;
845 u8 op;
847 mutex_lock(&rdata->rp_mutex);
849 FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
851 if (rdata->rp_state != RPORT_ST_RTV) {
852 FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
853 "%s\n", fc_rport_state(rdata));
854 if (IS_ERR(fp))
855 goto err;
856 goto out;
859 if (IS_ERR(fp)) {
860 fc_rport_error(rdata, fp);
861 goto err;
864 op = fc_frame_payload_op(fp);
865 if (op == ELS_LS_ACC) {
866 struct fc_els_rtv_acc *rtv;
867 u32 toq;
868 u32 tov;
870 rtv = fc_frame_payload_get(fp, sizeof(*rtv));
871 if (rtv) {
872 toq = ntohl(rtv->rtv_toq);
873 tov = ntohl(rtv->rtv_r_a_tov);
874 if (tov == 0)
875 tov = 1;
876 rdata->r_a_tov = tov;
877 tov = ntohl(rtv->rtv_e_d_tov);
878 if (toq & FC_ELS_RTV_EDRES)
879 tov /= 1000000;
880 if (tov == 0)
881 tov = 1;
882 rdata->e_d_tov = tov;
886 fc_rport_enter_ready(rdata);
888 out:
889 fc_frame_free(fp);
890 err:
891 mutex_unlock(&rdata->rp_mutex);
892 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
896 * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
897 * @rdata: private remote port data
899 * Locking Note: The rport lock is expected to be held before calling
900 * this routine.
902 static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
904 struct fc_frame *fp;
905 struct fc_lport *lport = rdata->local_port;
907 FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
908 fc_rport_state(rdata));
910 fc_rport_state_enter(rdata, RPORT_ST_RTV);
912 fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
913 if (!fp) {
914 fc_rport_error_retry(rdata, fp);
915 return;
918 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
919 fc_rport_rtv_resp, rdata, lport->e_d_tov))
920 fc_rport_error_retry(rdata, NULL);
921 else
922 kref_get(&rdata->kref);
926 * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
927 * @rdata: private remote port data
929 * Locking Note: The rport lock is expected to be held before calling
930 * this routine.
932 static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
934 struct fc_lport *lport = rdata->local_port;
935 struct fc_frame *fp;
937 FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
938 fc_rport_state(rdata));
940 fc_rport_state_enter(rdata, RPORT_ST_LOGO);
942 fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
943 if (!fp) {
944 fc_rport_error_retry(rdata, fp);
945 return;
948 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
949 fc_rport_logo_resp, rdata, lport->e_d_tov))
950 fc_rport_error_retry(rdata, NULL);
951 else
952 kref_get(&rdata->kref);
956 * fc_rport_els_adisc_resp() - Address Discovery response handler
957 * @sp: current sequence in the ADISC exchange
958 * @fp: response frame
959 * @rdata_arg: remote port private.
961 * Locking Note: This function will be called without the rport lock
962 * held, but it will lock, call an _enter_* function or fc_rport_error
963 * and then unlock the rport.
965 static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
966 void *rdata_arg)
968 struct fc_rport_priv *rdata = rdata_arg;
969 struct fc_els_adisc *adisc;
970 u8 op;
972 mutex_lock(&rdata->rp_mutex);
974 FC_RPORT_DBG(rdata, "Received a ADISC response\n");
976 if (rdata->rp_state != RPORT_ST_ADISC) {
977 FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
978 fc_rport_state(rdata));
979 if (IS_ERR(fp))
980 goto err;
981 goto out;
984 if (IS_ERR(fp)) {
985 fc_rport_error(rdata, fp);
986 goto err;
990 * If address verification failed. Consider us logged out of the rport.
991 * Since the rport is still in discovery, we want to be
992 * logged in, so go to PLOGI state. Otherwise, go back to READY.
994 op = fc_frame_payload_op(fp);
995 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
996 if (op != ELS_LS_ACC || !adisc ||
997 ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
998 get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
999 get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
1000 FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
1001 fc_rport_enter_plogi(rdata);
1002 } else {
1003 FC_RPORT_DBG(rdata, "ADISC OK\n");
1004 fc_rport_enter_ready(rdata);
1006 out:
1007 fc_frame_free(fp);
1008 err:
1009 mutex_unlock(&rdata->rp_mutex);
1010 kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
1014 * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
1015 * @rdata: remote port private data
1017 * Locking Note: The rport lock is expected to be held before calling
1018 * this routine.
1020 static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
1022 struct fc_lport *lport = rdata->local_port;
1023 struct fc_frame *fp;
1025 FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
1026 fc_rport_state(rdata));
1028 fc_rport_state_enter(rdata, RPORT_ST_ADISC);
1030 fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
1031 if (!fp) {
1032 fc_rport_error_retry(rdata, fp);
1033 return;
1035 if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
1036 fc_rport_adisc_resp, rdata, lport->e_d_tov))
1037 fc_rport_error_retry(rdata, NULL);
1038 else
1039 kref_get(&rdata->kref);
1043 * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request
1044 * @rdata: remote port private
1045 * @sp: current sequence in the ADISC exchange
1046 * @in_fp: ADISC request frame
1048 * Locking Note: Called with the lport and rport locks held.
1050 static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
1051 struct fc_seq *sp, struct fc_frame *in_fp)
1053 struct fc_lport *lport = rdata->local_port;
1054 struct fc_frame *fp;
1055 struct fc_exch *ep = fc_seq_exch(sp);
1056 struct fc_els_adisc *adisc;
1057 struct fc_seq_els_data rjt_data;
1058 u32 f_ctl;
1060 FC_RPORT_DBG(rdata, "Received ADISC request\n");
1062 adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
1063 if (!adisc) {
1064 rjt_data.fp = NULL;
1065 rjt_data.reason = ELS_RJT_PROT;
1066 rjt_data.explan = ELS_EXPL_INV_LEN;
1067 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1068 goto drop;
1071 fp = fc_frame_alloc(lport, sizeof(*adisc));
1072 if (!fp)
1073 goto drop;
1074 fc_adisc_fill(lport, fp);
1075 adisc = fc_frame_payload_get(fp, sizeof(*adisc));
1076 adisc->adisc_cmd = ELS_LS_ACC;
1077 sp = lport->tt.seq_start_next(sp);
1078 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1079 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1080 FC_TYPE_ELS, f_ctl, 0);
1081 lport->tt.seq_send(lport, sp, fp);
1082 drop:
1083 fc_frame_free(in_fp);
1087 * fc_rport_recv_els_req() - handle a validated ELS request.
1088 * @lport: Fibre Channel local port
1089 * @sp: current sequence in the PLOGI exchange
1090 * @fp: response frame
1092 * Handle incoming ELS requests that require port login.
1093 * The ELS opcode has already been validated by the caller.
1095 * Locking Note: Called with the lport lock held.
1097 static void fc_rport_recv_els_req(struct fc_lport *lport,
1098 struct fc_seq *sp, struct fc_frame *fp)
1100 struct fc_rport_priv *rdata;
1101 struct fc_frame_header *fh;
1102 struct fc_seq_els_data els_data;
1104 els_data.fp = NULL;
1105 els_data.reason = ELS_RJT_UNAB;
1106 els_data.explan = ELS_EXPL_PLOGI_REQD;
1108 fh = fc_frame_header_get(fp);
1110 mutex_lock(&lport->disc.disc_mutex);
1111 rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
1112 if (!rdata) {
1113 mutex_unlock(&lport->disc.disc_mutex);
1114 goto reject;
1116 mutex_lock(&rdata->rp_mutex);
1117 mutex_unlock(&lport->disc.disc_mutex);
1119 switch (rdata->rp_state) {
1120 case RPORT_ST_PRLI:
1121 case RPORT_ST_RTV:
1122 case RPORT_ST_READY:
1123 case RPORT_ST_ADISC:
1124 break;
1125 default:
1126 mutex_unlock(&rdata->rp_mutex);
1127 goto reject;
1130 switch (fc_frame_payload_op(fp)) {
1131 case ELS_PRLI:
1132 fc_rport_recv_prli_req(rdata, sp, fp);
1133 break;
1134 case ELS_PRLO:
1135 fc_rport_recv_prlo_req(rdata, sp, fp);
1136 break;
1137 case ELS_ADISC:
1138 fc_rport_recv_adisc_req(rdata, sp, fp);
1139 break;
1140 case ELS_RRQ:
1141 els_data.fp = fp;
1142 lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
1143 break;
1144 case ELS_REC:
1145 els_data.fp = fp;
1146 lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
1147 break;
1148 default:
1149 fc_frame_free(fp); /* can't happen */
1150 break;
1153 mutex_unlock(&rdata->rp_mutex);
1154 return;
1156 reject:
1157 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1158 fc_frame_free(fp);
1162 * fc_rport_recv_req() - Handle a received ELS request from a rport
1163 * @sp: current sequence in the PLOGI exchange
1164 * @fp: response frame
1165 * @lport: Fibre Channel local port
1167 * Locking Note: Called with the lport lock held.
1169 void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
1170 struct fc_lport *lport)
1172 struct fc_seq_els_data els_data;
1175 * Handle PLOGI and LOGO requests separately, since they
1176 * don't require prior login.
1177 * Check for unsupported opcodes first and reject them.
1178 * For some ops, it would be incorrect to reject with "PLOGI required".
1180 switch (fc_frame_payload_op(fp)) {
1181 case ELS_PLOGI:
1182 fc_rport_recv_plogi_req(lport, sp, fp);
1183 break;
1184 case ELS_LOGO:
1185 fc_rport_recv_logo_req(lport, sp, fp);
1186 break;
1187 case ELS_PRLI:
1188 case ELS_PRLO:
1189 case ELS_ADISC:
1190 case ELS_RRQ:
1191 case ELS_REC:
1192 fc_rport_recv_els_req(lport, sp, fp);
1193 break;
1194 default:
1195 fc_frame_free(fp);
1196 els_data.fp = NULL;
1197 els_data.reason = ELS_RJT_UNSUP;
1198 els_data.explan = ELS_EXPL_NONE;
1199 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
1200 break;
1205 * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
1206 * @lport: local port
1207 * @sp: current sequence in the PLOGI exchange
1208 * @fp: PLOGI request frame
1210 * Locking Note: The rport lock is held before calling this function.
1212 static void fc_rport_recv_plogi_req(struct fc_lport *lport,
1213 struct fc_seq *sp, struct fc_frame *rx_fp)
1215 struct fc_disc *disc;
1216 struct fc_rport_priv *rdata;
1217 struct fc_frame *fp = rx_fp;
1218 struct fc_exch *ep;
1219 struct fc_frame_header *fh;
1220 struct fc_els_flogi *pl;
1221 struct fc_seq_els_data rjt_data;
1222 u32 sid, f_ctl;
1224 rjt_data.fp = NULL;
1225 fh = fc_frame_header_get(fp);
1226 sid = ntoh24(fh->fh_s_id);
1228 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
1230 pl = fc_frame_payload_get(fp, sizeof(*pl));
1231 if (!pl) {
1232 FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
1233 rjt_data.reason = ELS_RJT_PROT;
1234 rjt_data.explan = ELS_EXPL_INV_LEN;
1235 goto reject;
1238 disc = &lport->disc;
1239 mutex_lock(&disc->disc_mutex);
1240 rdata = lport->tt.rport_create(lport, sid);
1241 if (!rdata) {
1242 mutex_unlock(&disc->disc_mutex);
1243 rjt_data.reason = ELS_RJT_UNAB;
1244 rjt_data.explan = ELS_EXPL_INSUF_RES;
1245 goto reject;
1248 mutex_lock(&rdata->rp_mutex);
1249 mutex_unlock(&disc->disc_mutex);
1251 rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
1252 rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
1255 * If the rport was just created, possibly due to the incoming PLOGI,
1256 * set the state appropriately and accept the PLOGI.
1258 * If we had also sent a PLOGI, and if the received PLOGI is from a
1259 * higher WWPN, we accept it, otherwise an LS_RJT is sent with reason
1260 * "command already in progress".
1262 * XXX TBD: If the session was ready before, the PLOGI should result in
1263 * all outstanding exchanges being reset.
1265 switch (rdata->rp_state) {
1266 case RPORT_ST_INIT:
1267 FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
1268 break;
1269 case RPORT_ST_PLOGI:
1270 FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
1271 if (rdata->ids.port_name < lport->wwpn) {
1272 mutex_unlock(&rdata->rp_mutex);
1273 rjt_data.reason = ELS_RJT_INPROG;
1274 rjt_data.explan = ELS_EXPL_NONE;
1275 goto reject;
1277 break;
1278 case RPORT_ST_PRLI:
1279 case RPORT_ST_RTV:
1280 case RPORT_ST_READY:
1281 case RPORT_ST_ADISC:
1282 FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
1283 "- ignored for now\n", rdata->rp_state);
1284 /* XXX TBD - should reset */
1285 break;
1286 case RPORT_ST_DELETE:
1287 case RPORT_ST_LOGO:
1288 case RPORT_ST_RESTART:
1289 FC_RPORT_DBG(rdata, "Received PLOGI in state %s - send busy\n",
1290 fc_rport_state(rdata));
1291 mutex_unlock(&rdata->rp_mutex);
1292 rjt_data.reason = ELS_RJT_BUSY;
1293 rjt_data.explan = ELS_EXPL_NONE;
1294 goto reject;
1298 * Get session payload size from incoming PLOGI.
1300 rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
1301 fc_frame_free(rx_fp);
1304 * Send LS_ACC. If this fails, the originator should retry.
1306 sp = lport->tt.seq_start_next(sp);
1307 if (!sp)
1308 goto out;
1309 fp = fc_frame_alloc(lport, sizeof(*pl));
1310 if (!fp)
1311 goto out;
1313 fc_plogi_fill(lport, fp, ELS_LS_ACC);
1314 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1315 ep = fc_seq_exch(sp);
1316 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1317 FC_TYPE_ELS, f_ctl, 0);
1318 lport->tt.seq_send(lport, sp, fp);
1319 fc_rport_enter_prli(rdata);
1320 out:
1321 mutex_unlock(&rdata->rp_mutex);
1322 return;
1324 reject:
1325 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1326 fc_frame_free(fp);
1330 * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
1331 * @rdata: private remote port data
1332 * @sp: current sequence in the PRLI exchange
1333 * @fp: PRLI request frame
1335 * Locking Note: The rport lock is exected to be held before calling
1336 * this function.
1338 static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
1339 struct fc_seq *sp, struct fc_frame *rx_fp)
1341 struct fc_lport *lport = rdata->local_port;
1342 struct fc_exch *ep;
1343 struct fc_frame *fp;
1344 struct fc_frame_header *fh;
1345 struct {
1346 struct fc_els_prli prli;
1347 struct fc_els_spp spp;
1348 } *pp;
1349 struct fc_els_spp *rspp; /* request service param page */
1350 struct fc_els_spp *spp; /* response spp */
1351 unsigned int len;
1352 unsigned int plen;
1353 enum fc_els_rjt_reason reason = ELS_RJT_UNAB;
1354 enum fc_els_rjt_explan explan = ELS_EXPL_NONE;
1355 enum fc_els_spp_resp resp;
1356 struct fc_seq_els_data rjt_data;
1357 u32 f_ctl;
1358 u32 fcp_parm;
1359 u32 roles = FC_RPORT_ROLE_UNKNOWN;
1360 rjt_data.fp = NULL;
1362 fh = fc_frame_header_get(rx_fp);
1364 FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
1365 fc_rport_state(rdata));
1367 switch (rdata->rp_state) {
1368 case RPORT_ST_PRLI:
1369 case RPORT_ST_RTV:
1370 case RPORT_ST_READY:
1371 case RPORT_ST_ADISC:
1372 reason = ELS_RJT_NONE;
1373 break;
1374 default:
1375 fc_frame_free(rx_fp);
1376 return;
1377 break;
1379 len = fr_len(rx_fp) - sizeof(*fh);
1380 pp = fc_frame_payload_get(rx_fp, sizeof(*pp));
1381 if (pp == NULL) {
1382 reason = ELS_RJT_PROT;
1383 explan = ELS_EXPL_INV_LEN;
1384 } else {
1385 plen = ntohs(pp->prli.prli_len);
1386 if ((plen % 4) != 0 || plen > len) {
1387 reason = ELS_RJT_PROT;
1388 explan = ELS_EXPL_INV_LEN;
1389 } else if (plen < len) {
1390 len = plen;
1392 plen = pp->prli.prli_spp_len;
1393 if ((plen % 4) != 0 || plen < sizeof(*spp) ||
1394 plen > len || len < sizeof(*pp)) {
1395 reason = ELS_RJT_PROT;
1396 explan = ELS_EXPL_INV_LEN;
1398 rspp = &pp->spp;
1400 if (reason != ELS_RJT_NONE ||
1401 (fp = fc_frame_alloc(lport, len)) == NULL) {
1402 rjt_data.reason = reason;
1403 rjt_data.explan = explan;
1404 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1405 } else {
1406 sp = lport->tt.seq_start_next(sp);
1407 WARN_ON(!sp);
1408 pp = fc_frame_payload_get(fp, len);
1409 WARN_ON(!pp);
1410 memset(pp, 0, len);
1411 pp->prli.prli_cmd = ELS_LS_ACC;
1412 pp->prli.prli_spp_len = plen;
1413 pp->prli.prli_len = htons(len);
1414 len -= sizeof(struct fc_els_prli);
1416 /* reinitialize remote port roles */
1417 rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
1420 * Go through all the service parameter pages and build
1421 * response. If plen indicates longer SPP than standard,
1422 * use that. The entire response has been pre-cleared above.
1424 spp = &pp->spp;
1425 while (len >= plen) {
1426 spp->spp_type = rspp->spp_type;
1427 spp->spp_type_ext = rspp->spp_type_ext;
1428 spp->spp_flags = rspp->spp_flags & FC_SPP_EST_IMG_PAIR;
1429 resp = FC_SPP_RESP_ACK;
1430 if (rspp->spp_flags & FC_SPP_RPA_VAL)
1431 resp = FC_SPP_RESP_NO_PA;
1432 switch (rspp->spp_type) {
1433 case 0: /* common to all FC-4 types */
1434 break;
1435 case FC_TYPE_FCP:
1436 fcp_parm = ntohl(rspp->spp_params);
1437 if (fcp_parm & FCP_SPPF_RETRY)
1438 rdata->flags |= FC_RP_FLAGS_RETRY;
1439 rdata->supported_classes = FC_COS_CLASS3;
1440 if (fcp_parm & FCP_SPPF_INIT_FCN)
1441 roles |= FC_RPORT_ROLE_FCP_INITIATOR;
1442 if (fcp_parm & FCP_SPPF_TARG_FCN)
1443 roles |= FC_RPORT_ROLE_FCP_TARGET;
1444 rdata->ids.roles = roles;
1446 spp->spp_params =
1447 htonl(lport->service_params);
1448 break;
1449 default:
1450 resp = FC_SPP_RESP_INVL;
1451 break;
1453 spp->spp_flags |= resp;
1454 len -= plen;
1455 rspp = (struct fc_els_spp *)((char *)rspp + plen);
1456 spp = (struct fc_els_spp *)((char *)spp + plen);
1460 * Send LS_ACC. If this fails, the originator should retry.
1462 f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
1463 f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
1464 ep = fc_seq_exch(sp);
1465 fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
1466 FC_TYPE_ELS, f_ctl, 0);
1467 lport->tt.seq_send(lport, sp, fp);
1470 * Get lock and re-check state.
1472 switch (rdata->rp_state) {
1473 case RPORT_ST_PRLI:
1474 fc_rport_enter_ready(rdata);
1475 break;
1476 case RPORT_ST_READY:
1477 case RPORT_ST_ADISC:
1478 break;
1479 default:
1480 break;
1483 fc_frame_free(rx_fp);
1487 * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
1488 * @rdata: private remote port data
1489 * @sp: current sequence in the PRLO exchange
1490 * @fp: PRLO request frame
1492 * Locking Note: The rport lock is exected to be held before calling
1493 * this function.
1495 static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
1496 struct fc_seq *sp,
1497 struct fc_frame *fp)
1499 struct fc_lport *lport = rdata->local_port;
1501 struct fc_frame_header *fh;
1502 struct fc_seq_els_data rjt_data;
1504 fh = fc_frame_header_get(fp);
1506 FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
1507 fc_rport_state(rdata));
1509 rjt_data.fp = NULL;
1510 rjt_data.reason = ELS_RJT_UNAB;
1511 rjt_data.explan = ELS_EXPL_NONE;
1512 lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
1513 fc_frame_free(fp);
1517 * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
1518 * @lport: local port.
1519 * @sp: current sequence in the LOGO exchange
1520 * @fp: LOGO request frame
1522 * Locking Note: The rport lock is exected to be held before calling
1523 * this function.
1525 static void fc_rport_recv_logo_req(struct fc_lport *lport,
1526 struct fc_seq *sp,
1527 struct fc_frame *fp)
1529 struct fc_frame_header *fh;
1530 struct fc_rport_priv *rdata;
1531 u32 sid;
1533 lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
1535 fh = fc_frame_header_get(fp);
1536 sid = ntoh24(fh->fh_s_id);
1538 mutex_lock(&lport->disc.disc_mutex);
1539 rdata = lport->tt.rport_lookup(lport, sid);
1540 if (rdata) {
1541 mutex_lock(&rdata->rp_mutex);
1542 FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
1543 fc_rport_state(rdata));
1545 fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
1548 * If the remote port was created due to discovery, set state
1549 * to log back in. It may have seen a stale RSCN about us.
1551 if (rdata->disc_id)
1552 fc_rport_state_enter(rdata, RPORT_ST_RESTART);
1553 mutex_unlock(&rdata->rp_mutex);
1554 } else
1555 FC_RPORT_ID_DBG(lport, sid,
1556 "Received LOGO from non-logged-in port\n");
1557 mutex_unlock(&lport->disc.disc_mutex);
1558 fc_frame_free(fp);
1561 static void fc_rport_flush_queue(void)
1563 flush_workqueue(rport_event_queue);
1566 int fc_rport_init(struct fc_lport *lport)
1568 if (!lport->tt.rport_lookup)
1569 lport->tt.rport_lookup = fc_rport_lookup;
1571 if (!lport->tt.rport_create)
1572 lport->tt.rport_create = fc_rport_create;
1574 if (!lport->tt.rport_login)
1575 lport->tt.rport_login = fc_rport_login;
1577 if (!lport->tt.rport_logoff)
1578 lport->tt.rport_logoff = fc_rport_logoff;
1580 if (!lport->tt.rport_recv_req)
1581 lport->tt.rport_recv_req = fc_rport_recv_req;
1583 if (!lport->tt.rport_flush_queue)
1584 lport->tt.rport_flush_queue = fc_rport_flush_queue;
1586 if (!lport->tt.rport_destroy)
1587 lport->tt.rport_destroy = fc_rport_destroy;
1589 return 0;
1591 EXPORT_SYMBOL(fc_rport_init);
1593 int fc_setup_rport(void)
1595 rport_event_queue = create_singlethread_workqueue("fc_rport_eq");
1596 if (!rport_event_queue)
1597 return -ENOMEM;
1598 return 0;
1600 EXPORT_SYMBOL(fc_setup_rport);
1602 void fc_destroy_rport(void)
1604 destroy_workqueue(rport_event_queue);
1606 EXPORT_SYMBOL(fc_destroy_rport);
1608 void fc_rport_terminate_io(struct fc_rport *rport)
1610 struct fc_rport_libfc_priv *rp = rport->dd_data;
1611 struct fc_lport *lport = rp->local_port;
1613 lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
1614 lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
1616 EXPORT_SYMBOL(fc_rport_terminate_io);