2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
20 #include <cs/bfa_debug.h>
21 #include <bfi/bfi_rport.h>
22 #include "bfa_intr_priv.h"
24 BFA_TRC_FILE(HAL
, RPORT
);
27 #define bfa_rport_offline_cb(__rp) do { \
28 if ((__rp)->bfa->fcs) \
29 bfa_cb_rport_offline((__rp)->rport_drv); \
31 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
32 __bfa_cb_rport_offline, (__rp)); \
36 #define bfa_rport_online_cb(__rp) do { \
37 if ((__rp)->bfa->fcs) \
38 bfa_cb_rport_online((__rp)->rport_drv); \
40 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
41 __bfa_cb_rport_online, (__rp)); \
46 * forward declarations
48 static struct bfa_rport_s
*bfa_rport_alloc(struct bfa_rport_mod_s
*rp_mod
);
49 static void bfa_rport_free(struct bfa_rport_s
*rport
);
50 static bfa_boolean_t
bfa_rport_send_fwcreate(struct bfa_rport_s
*rp
);
51 static bfa_boolean_t
bfa_rport_send_fwdelete(struct bfa_rport_s
*rp
);
52 static bfa_boolean_t
bfa_rport_send_fwspeed(struct bfa_rport_s
*rp
);
53 static void __bfa_cb_rport_online(void *cbarg
, bfa_boolean_t complete
);
54 static void __bfa_cb_rport_offline(void *cbarg
, bfa_boolean_t complete
);
57 * bfa_rport_sm BFA rport state machine
61 enum bfa_rport_event
{
62 BFA_RPORT_SM_CREATE
= 1, /* rport create event */
63 BFA_RPORT_SM_DELETE
= 2, /* deleting an existing rport */
64 BFA_RPORT_SM_ONLINE
= 3, /* rport is online */
65 BFA_RPORT_SM_OFFLINE
= 4, /* rport is offline */
66 BFA_RPORT_SM_FWRSP
= 5, /* firmware response */
67 BFA_RPORT_SM_HWFAIL
= 6, /* IOC h/w failure */
68 BFA_RPORT_SM_QOS_SCN
= 7, /* QoS SCN from firmware */
69 BFA_RPORT_SM_SET_SPEED
= 8, /* Set Rport Speed */
70 BFA_RPORT_SM_QRESUME
= 9, /* space in requeue queue */
73 static void bfa_rport_sm_uninit(struct bfa_rport_s
*rp
,
74 enum bfa_rport_event event
);
75 static void bfa_rport_sm_created(struct bfa_rport_s
*rp
,
76 enum bfa_rport_event event
);
77 static void bfa_rport_sm_fwcreate(struct bfa_rport_s
*rp
,
78 enum bfa_rport_event event
);
79 static void bfa_rport_sm_online(struct bfa_rport_s
*rp
,
80 enum bfa_rport_event event
);
81 static void bfa_rport_sm_fwdelete(struct bfa_rport_s
*rp
,
82 enum bfa_rport_event event
);
83 static void bfa_rport_sm_offline(struct bfa_rport_s
*rp
,
84 enum bfa_rport_event event
);
85 static void bfa_rport_sm_deleting(struct bfa_rport_s
*rp
,
86 enum bfa_rport_event event
);
87 static void bfa_rport_sm_offline_pending(struct bfa_rport_s
*rp
,
88 enum bfa_rport_event event
);
89 static void bfa_rport_sm_delete_pending(struct bfa_rport_s
*rp
,
90 enum bfa_rport_event event
);
91 static void bfa_rport_sm_iocdisable(struct bfa_rport_s
*rp
,
92 enum bfa_rport_event event
);
93 static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s
*rp
,
94 enum bfa_rport_event event
);
95 static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s
*rp
,
96 enum bfa_rport_event event
);
97 static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s
*rp
,
98 enum bfa_rport_event event
);
101 * Beginning state, only online event expected.
104 bfa_rport_sm_uninit(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
106 bfa_trc(rp
->bfa
, rp
->rport_tag
);
107 bfa_trc(rp
->bfa
, event
);
110 case BFA_RPORT_SM_CREATE
:
111 bfa_stats(rp
, sm_un_cr
);
112 bfa_sm_set_state(rp
, bfa_rport_sm_created
);
116 bfa_stats(rp
, sm_un_unexp
);
122 bfa_rport_sm_created(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
124 bfa_trc(rp
->bfa
, rp
->rport_tag
);
125 bfa_trc(rp
->bfa
, event
);
128 case BFA_RPORT_SM_ONLINE
:
129 bfa_stats(rp
, sm_cr_on
);
130 if (bfa_rport_send_fwcreate(rp
))
131 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
133 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
136 case BFA_RPORT_SM_DELETE
:
137 bfa_stats(rp
, sm_cr_del
);
138 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
142 case BFA_RPORT_SM_HWFAIL
:
143 bfa_stats(rp
, sm_cr_hwf
);
144 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
148 bfa_stats(rp
, sm_cr_unexp
);
154 * Waiting for rport create response from firmware.
157 bfa_rport_sm_fwcreate(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
159 bfa_trc(rp
->bfa
, rp
->rport_tag
);
160 bfa_trc(rp
->bfa
, event
);
163 case BFA_RPORT_SM_FWRSP
:
164 bfa_stats(rp
, sm_fwc_rsp
);
165 bfa_sm_set_state(rp
, bfa_rport_sm_online
);
166 bfa_rport_online_cb(rp
);
169 case BFA_RPORT_SM_DELETE
:
170 bfa_stats(rp
, sm_fwc_del
);
171 bfa_sm_set_state(rp
, bfa_rport_sm_delete_pending
);
174 case BFA_RPORT_SM_OFFLINE
:
175 bfa_stats(rp
, sm_fwc_off
);
176 bfa_sm_set_state(rp
, bfa_rport_sm_offline_pending
);
179 case BFA_RPORT_SM_HWFAIL
:
180 bfa_stats(rp
, sm_fwc_hwf
);
181 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
185 bfa_stats(rp
, sm_fwc_unexp
);
191 * Request queue is full, awaiting queue resume to send create request.
194 bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
196 bfa_trc(rp
->bfa
, rp
->rport_tag
);
197 bfa_trc(rp
->bfa
, event
);
200 case BFA_RPORT_SM_QRESUME
:
201 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
202 bfa_rport_send_fwcreate(rp
);
205 case BFA_RPORT_SM_DELETE
:
206 bfa_stats(rp
, sm_fwc_del
);
207 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
208 bfa_reqq_wcancel(&rp
->reqq_wait
);
212 case BFA_RPORT_SM_OFFLINE
:
213 bfa_stats(rp
, sm_fwc_off
);
214 bfa_sm_set_state(rp
, bfa_rport_sm_offline
);
215 bfa_reqq_wcancel(&rp
->reqq_wait
);
216 bfa_rport_offline_cb(rp
);
219 case BFA_RPORT_SM_HWFAIL
:
220 bfa_stats(rp
, sm_fwc_hwf
);
221 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
222 bfa_reqq_wcancel(&rp
->reqq_wait
);
226 bfa_stats(rp
, sm_fwc_unexp
);
232 * Online state - normal parking state.
235 bfa_rport_sm_online(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
237 struct bfi_rport_qos_scn_s
*qos_scn
;
239 bfa_trc(rp
->bfa
, rp
->rport_tag
);
240 bfa_trc(rp
->bfa
, event
);
243 case BFA_RPORT_SM_OFFLINE
:
244 bfa_stats(rp
, sm_on_off
);
245 if (bfa_rport_send_fwdelete(rp
))
246 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
248 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete_qfull
);
251 case BFA_RPORT_SM_DELETE
:
252 bfa_stats(rp
, sm_on_del
);
253 if (bfa_rport_send_fwdelete(rp
))
254 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
256 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
259 case BFA_RPORT_SM_HWFAIL
:
260 bfa_stats(rp
, sm_on_hwf
);
261 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
264 case BFA_RPORT_SM_SET_SPEED
:
265 bfa_rport_send_fwspeed(rp
);
268 case BFA_RPORT_SM_QOS_SCN
:
269 qos_scn
= (struct bfi_rport_qos_scn_s
*) rp
->event_arg
.fw_msg
;
270 rp
->qos_attr
= qos_scn
->new_qos_attr
;
271 bfa_trc(rp
->bfa
, qos_scn
->old_qos_attr
.qos_flow_id
);
272 bfa_trc(rp
->bfa
, qos_scn
->new_qos_attr
.qos_flow_id
);
273 bfa_trc(rp
->bfa
, qos_scn
->old_qos_attr
.qos_priority
);
274 bfa_trc(rp
->bfa
, qos_scn
->new_qos_attr
.qos_priority
);
276 qos_scn
->old_qos_attr
.qos_flow_id
=
277 bfa_os_ntohl(qos_scn
->old_qos_attr
.qos_flow_id
);
278 qos_scn
->new_qos_attr
.qos_flow_id
=
279 bfa_os_ntohl(qos_scn
->new_qos_attr
.qos_flow_id
);
280 qos_scn
->old_qos_attr
.qos_priority
=
281 bfa_os_ntohl(qos_scn
->old_qos_attr
.qos_priority
);
282 qos_scn
->new_qos_attr
.qos_priority
=
283 bfa_os_ntohl(qos_scn
->new_qos_attr
.qos_priority
);
285 if (qos_scn
->old_qos_attr
.qos_flow_id
!=
286 qos_scn
->new_qos_attr
.qos_flow_id
)
287 bfa_cb_rport_qos_scn_flowid(rp
->rport_drv
,
288 qos_scn
->old_qos_attr
,
289 qos_scn
->new_qos_attr
);
290 if (qos_scn
->old_qos_attr
.qos_priority
!=
291 qos_scn
->new_qos_attr
.qos_priority
)
292 bfa_cb_rport_qos_scn_prio(rp
->rport_drv
,
293 qos_scn
->old_qos_attr
,
294 qos_scn
->new_qos_attr
);
298 bfa_stats(rp
, sm_on_unexp
);
304 * Firmware rport is being deleted - awaiting f/w response.
307 bfa_rport_sm_fwdelete(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
309 bfa_trc(rp
->bfa
, rp
->rport_tag
);
310 bfa_trc(rp
->bfa
, event
);
313 case BFA_RPORT_SM_FWRSP
:
314 bfa_stats(rp
, sm_fwd_rsp
);
315 bfa_sm_set_state(rp
, bfa_rport_sm_offline
);
316 bfa_rport_offline_cb(rp
);
319 case BFA_RPORT_SM_DELETE
:
320 bfa_stats(rp
, sm_fwd_del
);
321 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
324 case BFA_RPORT_SM_HWFAIL
:
325 bfa_stats(rp
, sm_fwd_hwf
);
326 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
327 bfa_rport_offline_cb(rp
);
331 bfa_stats(rp
, sm_fwd_unexp
);
337 bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
339 bfa_trc(rp
->bfa
, rp
->rport_tag
);
340 bfa_trc(rp
->bfa
, event
);
343 case BFA_RPORT_SM_QRESUME
:
344 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
345 bfa_rport_send_fwdelete(rp
);
348 case BFA_RPORT_SM_DELETE
:
349 bfa_stats(rp
, sm_fwd_del
);
350 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
353 case BFA_RPORT_SM_HWFAIL
:
354 bfa_stats(rp
, sm_fwd_hwf
);
355 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
356 bfa_reqq_wcancel(&rp
->reqq_wait
);
357 bfa_rport_offline_cb(rp
);
361 bfa_stats(rp
, sm_fwd_unexp
);
370 bfa_rport_sm_offline(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
372 bfa_trc(rp
->bfa
, rp
->rport_tag
);
373 bfa_trc(rp
->bfa
, event
);
376 case BFA_RPORT_SM_DELETE
:
377 bfa_stats(rp
, sm_off_del
);
378 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
382 case BFA_RPORT_SM_ONLINE
:
383 bfa_stats(rp
, sm_off_on
);
384 if (bfa_rport_send_fwcreate(rp
))
385 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
387 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
390 case BFA_RPORT_SM_HWFAIL
:
391 bfa_stats(rp
, sm_off_hwf
);
392 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
396 bfa_stats(rp
, sm_off_unexp
);
402 * Rport is deleted, waiting for firmware response to delete.
405 bfa_rport_sm_deleting(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
407 bfa_trc(rp
->bfa
, rp
->rport_tag
);
408 bfa_trc(rp
->bfa
, event
);
411 case BFA_RPORT_SM_FWRSP
:
412 bfa_stats(rp
, sm_del_fwrsp
);
413 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
417 case BFA_RPORT_SM_HWFAIL
:
418 bfa_stats(rp
, sm_del_hwf
);
419 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
429 bfa_rport_sm_deleting_qfull(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
431 bfa_trc(rp
->bfa
, rp
->rport_tag
);
432 bfa_trc(rp
->bfa
, event
);
435 case BFA_RPORT_SM_QRESUME
:
436 bfa_stats(rp
, sm_del_fwrsp
);
437 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
438 bfa_rport_send_fwdelete(rp
);
441 case BFA_RPORT_SM_HWFAIL
:
442 bfa_stats(rp
, sm_del_hwf
);
443 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
444 bfa_reqq_wcancel(&rp
->reqq_wait
);
454 * Waiting for rport create response from firmware. A delete is pending.
457 bfa_rport_sm_delete_pending(struct bfa_rport_s
*rp
,
458 enum bfa_rport_event event
)
460 bfa_trc(rp
->bfa
, rp
->rport_tag
);
461 bfa_trc(rp
->bfa
, event
);
464 case BFA_RPORT_SM_FWRSP
:
465 bfa_stats(rp
, sm_delp_fwrsp
);
466 if (bfa_rport_send_fwdelete(rp
))
467 bfa_sm_set_state(rp
, bfa_rport_sm_deleting
);
469 bfa_sm_set_state(rp
, bfa_rport_sm_deleting_qfull
);
472 case BFA_RPORT_SM_HWFAIL
:
473 bfa_stats(rp
, sm_delp_hwf
);
474 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
479 bfa_stats(rp
, sm_delp_unexp
);
485 * Waiting for rport create response from firmware. Rport offline is pending.
488 bfa_rport_sm_offline_pending(struct bfa_rport_s
*rp
,
489 enum bfa_rport_event event
)
491 bfa_trc(rp
->bfa
, rp
->rport_tag
);
492 bfa_trc(rp
->bfa
, event
);
495 case BFA_RPORT_SM_FWRSP
:
496 bfa_stats(rp
, sm_offp_fwrsp
);
497 if (bfa_rport_send_fwdelete(rp
))
498 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete
);
500 bfa_sm_set_state(rp
, bfa_rport_sm_fwdelete_qfull
);
503 case BFA_RPORT_SM_DELETE
:
504 bfa_stats(rp
, sm_offp_del
);
505 bfa_sm_set_state(rp
, bfa_rport_sm_delete_pending
);
508 case BFA_RPORT_SM_HWFAIL
:
509 bfa_stats(rp
, sm_offp_hwf
);
510 bfa_sm_set_state(rp
, bfa_rport_sm_iocdisable
);
514 bfa_stats(rp
, sm_offp_unexp
);
523 bfa_rport_sm_iocdisable(struct bfa_rport_s
*rp
, enum bfa_rport_event event
)
525 bfa_trc(rp
->bfa
, rp
->rport_tag
);
526 bfa_trc(rp
->bfa
, event
);
529 case BFA_RPORT_SM_OFFLINE
:
530 bfa_stats(rp
, sm_iocd_off
);
531 bfa_rport_offline_cb(rp
);
534 case BFA_RPORT_SM_DELETE
:
535 bfa_stats(rp
, sm_iocd_del
);
536 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
540 case BFA_RPORT_SM_ONLINE
:
541 bfa_stats(rp
, sm_iocd_on
);
542 if (bfa_rport_send_fwcreate(rp
))
543 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate
);
545 bfa_sm_set_state(rp
, bfa_rport_sm_fwcreate_qfull
);
548 case BFA_RPORT_SM_HWFAIL
:
552 bfa_stats(rp
, sm_iocd_unexp
);
560 * bfa_rport_private BFA rport private functions
564 __bfa_cb_rport_online(void *cbarg
, bfa_boolean_t complete
)
566 struct bfa_rport_s
*rp
= cbarg
;
569 bfa_cb_rport_online(rp
->rport_drv
);
573 __bfa_cb_rport_offline(void *cbarg
, bfa_boolean_t complete
)
575 struct bfa_rport_s
*rp
= cbarg
;
578 bfa_cb_rport_offline(rp
->rport_drv
);
582 bfa_rport_qresume(void *cbarg
)
584 struct bfa_rport_s
*rp
= cbarg
;
586 bfa_sm_send_event(rp
, BFA_RPORT_SM_QRESUME
);
590 bfa_rport_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*km_len
,
593 if (cfg
->fwcfg
.num_rports
< BFA_RPORT_MIN
)
594 cfg
->fwcfg
.num_rports
= BFA_RPORT_MIN
;
596 *km_len
+= cfg
->fwcfg
.num_rports
* sizeof(struct bfa_rport_s
);
600 bfa_rport_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
601 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
603 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(bfa
);
604 struct bfa_rport_s
*rp
;
607 INIT_LIST_HEAD(&mod
->rp_free_q
);
608 INIT_LIST_HEAD(&mod
->rp_active_q
);
610 rp
= (struct bfa_rport_s
*) bfa_meminfo_kva(meminfo
);
612 mod
->num_rports
= cfg
->fwcfg
.num_rports
;
614 bfa_assert(mod
->num_rports
615 && !(mod
->num_rports
& (mod
->num_rports
- 1)));
617 for (i
= 0; i
< mod
->num_rports
; i
++, rp
++) {
618 bfa_os_memset(rp
, 0, sizeof(struct bfa_rport_s
));
621 bfa_sm_set_state(rp
, bfa_rport_sm_uninit
);
627 list_add_tail(&rp
->qe
, &mod
->rp_free_q
);
629 bfa_reqq_winit(&rp
->reqq_wait
, bfa_rport_qresume
, rp
);
635 bfa_meminfo_kva(meminfo
) = (u8
*) rp
;
639 bfa_rport_initdone(struct bfa_s
*bfa
)
644 bfa_rport_detach(struct bfa_s
*bfa
)
649 bfa_rport_start(struct bfa_s
*bfa
)
654 bfa_rport_stop(struct bfa_s
*bfa
)
659 bfa_rport_iocdisable(struct bfa_s
*bfa
)
661 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(bfa
);
662 struct bfa_rport_s
*rport
;
663 struct list_head
*qe
, *qen
;
665 list_for_each_safe(qe
, qen
, &mod
->rp_active_q
) {
666 rport
= (struct bfa_rport_s
*) qe
;
667 bfa_sm_send_event(rport
, BFA_RPORT_SM_HWFAIL
);
671 static struct bfa_rport_s
*
672 bfa_rport_alloc(struct bfa_rport_mod_s
*mod
)
674 struct bfa_rport_s
*rport
;
676 bfa_q_deq(&mod
->rp_free_q
, &rport
);
678 list_add_tail(&rport
->qe
, &mod
->rp_active_q
);
684 bfa_rport_free(struct bfa_rport_s
*rport
)
686 struct bfa_rport_mod_s
*mod
= BFA_RPORT_MOD(rport
->bfa
);
688 bfa_assert(bfa_q_is_on_q(&mod
->rp_active_q
, rport
));
689 list_del(&rport
->qe
);
690 list_add_tail(&rport
->qe
, &mod
->rp_free_q
);
694 bfa_rport_send_fwcreate(struct bfa_rport_s
*rp
)
696 struct bfi_rport_create_req_s
*m
;
699 * check for room in queue to send request now
701 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
703 bfa_reqq_wait(rp
->bfa
, BFA_REQQ_RPORT
, &rp
->reqq_wait
);
707 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_CREATE_REQ
,
709 m
->bfa_handle
= rp
->rport_tag
;
710 m
->max_frmsz
= bfa_os_htons(rp
->rport_info
.max_frmsz
);
711 m
->pid
= rp
->rport_info
.pid
;
712 m
->lp_tag
= rp
->rport_info
.lp_tag
;
713 m
->local_pid
= rp
->rport_info
.local_pid
;
714 m
->fc_class
= rp
->rport_info
.fc_class
;
715 m
->vf_en
= rp
->rport_info
.vf_en
;
716 m
->vf_id
= rp
->rport_info
.vf_id
;
717 m
->cisc
= rp
->rport_info
.cisc
;
720 * queue I/O message to firmware
722 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
727 bfa_rport_send_fwdelete(struct bfa_rport_s
*rp
)
729 struct bfi_rport_delete_req_s
*m
;
732 * check for room in queue to send request now
734 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
736 bfa_reqq_wait(rp
->bfa
, BFA_REQQ_RPORT
, &rp
->reqq_wait
);
740 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_DELETE_REQ
,
742 m
->fw_handle
= rp
->fw_handle
;
745 * queue I/O message to firmware
747 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
752 bfa_rport_send_fwspeed(struct bfa_rport_s
*rp
)
754 struct bfa_rport_speed_req_s
*m
;
757 * check for room in queue to send request now
759 m
= bfa_reqq_next(rp
->bfa
, BFA_REQQ_RPORT
);
761 bfa_trc(rp
->bfa
, rp
->rport_info
.speed
);
765 bfi_h2i_set(m
->mh
, BFI_MC_RPORT
, BFI_RPORT_H2I_SET_SPEED_REQ
,
767 m
->fw_handle
= rp
->fw_handle
;
768 m
->speed
= (u8
)rp
->rport_info
.speed
;
771 * queue I/O message to firmware
773 bfa_reqq_produce(rp
->bfa
, BFA_REQQ_RPORT
);
784 * Rport interrupt processing.
787 bfa_rport_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*m
)
789 union bfi_rport_i2h_msg_u msg
;
790 struct bfa_rport_s
*rp
;
792 bfa_trc(bfa
, m
->mhdr
.msg_id
);
796 switch (m
->mhdr
.msg_id
) {
797 case BFI_RPORT_I2H_CREATE_RSP
:
798 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.create_rsp
->bfa_handle
);
799 rp
->fw_handle
= msg
.create_rsp
->fw_handle
;
800 rp
->qos_attr
= msg
.create_rsp
->qos_attr
;
801 bfa_assert(msg
.create_rsp
->status
== BFA_STATUS_OK
);
802 bfa_sm_send_event(rp
, BFA_RPORT_SM_FWRSP
);
805 case BFI_RPORT_I2H_DELETE_RSP
:
806 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.delete_rsp
->bfa_handle
);
807 bfa_assert(msg
.delete_rsp
->status
== BFA_STATUS_OK
);
808 bfa_sm_send_event(rp
, BFA_RPORT_SM_FWRSP
);
811 case BFI_RPORT_I2H_QOS_SCN
:
812 rp
= BFA_RPORT_FROM_TAG(bfa
, msg
.qos_scn_evt
->bfa_handle
);
813 rp
->event_arg
.fw_msg
= msg
.qos_scn_evt
;
814 bfa_sm_send_event(rp
, BFA_RPORT_SM_QOS_SCN
);
818 bfa_trc(bfa
, m
->mhdr
.msg_id
);
830 bfa_rport_create(struct bfa_s
*bfa
, void *rport_drv
)
832 struct bfa_rport_s
*rp
;
834 rp
= bfa_rport_alloc(BFA_RPORT_MOD(bfa
));
840 rp
->rport_drv
= rport_drv
;
841 bfa_rport_clear_stats(rp
);
843 bfa_assert(bfa_sm_cmp_state(rp
, bfa_rport_sm_uninit
));
844 bfa_sm_send_event(rp
, BFA_RPORT_SM_CREATE
);
850 bfa_rport_delete(struct bfa_rport_s
*rport
)
852 bfa_sm_send_event(rport
, BFA_RPORT_SM_DELETE
);
856 bfa_rport_online(struct bfa_rport_s
*rport
, struct bfa_rport_info_s
*rport_info
)
858 bfa_assert(rport_info
->max_frmsz
!= 0);
861 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
862 * responses. Default to minimum size.
864 if (rport_info
->max_frmsz
== 0) {
865 bfa_trc(rport
->bfa
, rport
->rport_tag
);
866 rport_info
->max_frmsz
= FC_MIN_PDUSZ
;
869 bfa_os_assign(rport
->rport_info
, *rport_info
);
870 bfa_sm_send_event(rport
, BFA_RPORT_SM_ONLINE
);
874 bfa_rport_offline(struct bfa_rport_s
*rport
)
876 bfa_sm_send_event(rport
, BFA_RPORT_SM_OFFLINE
);
880 bfa_rport_speed(struct bfa_rport_s
*rport
, enum bfa_pport_speed speed
)
882 bfa_assert(speed
!= 0);
883 bfa_assert(speed
!= BFA_PPORT_SPEED_AUTO
);
885 rport
->rport_info
.speed
= speed
;
886 bfa_sm_send_event(rport
, BFA_RPORT_SM_SET_SPEED
);
890 bfa_rport_get_stats(struct bfa_rport_s
*rport
,
891 struct bfa_rport_hal_stats_s
*stats
)
893 *stats
= rport
->stats
;
897 bfa_rport_get_qos_attr(struct bfa_rport_s
*rport
,
898 struct bfa_rport_qos_attr_s
*qos_attr
)
900 qos_attr
->qos_priority
= bfa_os_ntohl(rport
->qos_attr
.qos_priority
);
901 qos_attr
->qos_flow_id
= bfa_os_ntohl(rport
->qos_attr
.qos_flow_id
);
906 bfa_rport_clear_stats(struct bfa_rport_s
*rport
)
908 bfa_os_memset(&rport
->stats
, 0, sizeof(rport
->stats
));