2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
20 #include <bfi/bfi_pport.h>
21 #include <cs/bfa_debug.h>
22 #include <aen/bfa_aen.h>
23 #include <cs/bfa_plog.h>
24 #include <aen/bfa_aen_port.h>
26 BFA_TRC_FILE(HAL
, PPORT
);
29 #define bfa_pport_callback(__pport, __event) do { \
30 if ((__pport)->bfa->fcs) { \
31 (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
33 (__pport)->hcb_event = (__event); \
34 bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
35 __bfa_cb_port_event, (__pport)); \
40 * The port is considered disabled if corresponding physical port or IOC are
43 #define BFA_PORT_IS_DISABLED(bfa) \
44 ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
45 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
48 * forward declarations
50 static bfa_boolean_t
bfa_pport_send_enable(struct bfa_pport_s
*port
);
51 static bfa_boolean_t
bfa_pport_send_disable(struct bfa_pport_s
*port
);
52 static void bfa_pport_update_linkinfo(struct bfa_pport_s
*pport
);
53 static void bfa_pport_reset_linkinfo(struct bfa_pport_s
*pport
);
54 static void bfa_pport_set_wwns(struct bfa_pport_s
*port
);
55 static void __bfa_cb_port_event(void *cbarg
, bfa_boolean_t complete
);
56 static void __bfa_cb_port_stats(void *cbarg
, bfa_boolean_t complete
);
57 static void __bfa_cb_port_stats_clr(void *cbarg
, bfa_boolean_t complete
);
58 static void bfa_port_stats_timeout(void *cbarg
);
59 static void bfa_port_stats_clr_timeout(void *cbarg
);
66 * BFA port state machine events
68 enum bfa_pport_sm_event
{
69 BFA_PPORT_SM_START
= 1, /* start port state machine */
70 BFA_PPORT_SM_STOP
= 2, /* stop port state machine */
71 BFA_PPORT_SM_ENABLE
= 3, /* enable port */
72 BFA_PPORT_SM_DISABLE
= 4, /* disable port state machine */
73 BFA_PPORT_SM_FWRSP
= 5, /* firmware enable/disable rsp */
74 BFA_PPORT_SM_LINKUP
= 6, /* firmware linkup event */
75 BFA_PPORT_SM_LINKDOWN
= 7, /* firmware linkup down */
76 BFA_PPORT_SM_QRESUME
= 8, /* CQ space available */
77 BFA_PPORT_SM_HWFAIL
= 9, /* IOC h/w failure */
80 static void bfa_pport_sm_uninit(struct bfa_pport_s
*pport
,
81 enum bfa_pport_sm_event event
);
82 static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s
*pport
,
83 enum bfa_pport_sm_event event
);
84 static void bfa_pport_sm_enabling(struct bfa_pport_s
*pport
,
85 enum bfa_pport_sm_event event
);
86 static void bfa_pport_sm_linkdown(struct bfa_pport_s
*pport
,
87 enum bfa_pport_sm_event event
);
88 static void bfa_pport_sm_linkup(struct bfa_pport_s
*pport
,
89 enum bfa_pport_sm_event event
);
90 static void bfa_pport_sm_disabling(struct bfa_pport_s
*pport
,
91 enum bfa_pport_sm_event event
);
92 static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s
*pport
,
93 enum bfa_pport_sm_event event
);
94 static void bfa_pport_sm_disabled(struct bfa_pport_s
*pport
,
95 enum bfa_pport_sm_event event
);
96 static void bfa_pport_sm_stopped(struct bfa_pport_s
*pport
,
97 enum bfa_pport_sm_event event
);
98 static void bfa_pport_sm_iocdown(struct bfa_pport_s
*pport
,
99 enum bfa_pport_sm_event event
);
100 static void bfa_pport_sm_iocfail(struct bfa_pport_s
*pport
,
101 enum bfa_pport_sm_event event
);
103 static struct bfa_sm_table_s hal_pport_sm_table
[] = {
104 {BFA_SM(bfa_pport_sm_uninit
), BFA_PPORT_ST_UNINIT
},
105 {BFA_SM(bfa_pport_sm_enabling_qwait
), BFA_PPORT_ST_ENABLING_QWAIT
},
106 {BFA_SM(bfa_pport_sm_enabling
), BFA_PPORT_ST_ENABLING
},
107 {BFA_SM(bfa_pport_sm_linkdown
), BFA_PPORT_ST_LINKDOWN
},
108 {BFA_SM(bfa_pport_sm_linkup
), BFA_PPORT_ST_LINKUP
},
109 {BFA_SM(bfa_pport_sm_disabling_qwait
),
110 BFA_PPORT_ST_DISABLING_QWAIT
},
111 {BFA_SM(bfa_pport_sm_disabling
), BFA_PPORT_ST_DISABLING
},
112 {BFA_SM(bfa_pport_sm_disabled
), BFA_PPORT_ST_DISABLED
},
113 {BFA_SM(bfa_pport_sm_stopped
), BFA_PPORT_ST_STOPPED
},
114 {BFA_SM(bfa_pport_sm_iocdown
), BFA_PPORT_ST_IOCDOWN
},
115 {BFA_SM(bfa_pport_sm_iocfail
), BFA_PPORT_ST_IOCDOWN
},
119 bfa_pport_aen_post(struct bfa_pport_s
*pport
, enum bfa_port_aen_event event
)
121 union bfa_aen_data_u aen_data
;
122 struct bfa_log_mod_s
*logmod
= pport
->bfa
->logm
;
123 wwn_t pwwn
= pport
->pwwn
;
124 char pwwn_ptr
[BFA_STRING_32
];
125 struct bfa_ioc_attr_s ioc_attr
;
127 wwn2str(pwwn_ptr
, pwwn
);
129 case BFA_PORT_AEN_ONLINE
:
130 bfa_log(logmod
, BFA_AEN_PORT_ONLINE
, pwwn_ptr
);
132 case BFA_PORT_AEN_OFFLINE
:
133 bfa_log(logmod
, BFA_AEN_PORT_OFFLINE
, pwwn_ptr
);
135 case BFA_PORT_AEN_ENABLE
:
136 bfa_log(logmod
, BFA_AEN_PORT_ENABLE
, pwwn_ptr
);
138 case BFA_PORT_AEN_DISABLE
:
139 bfa_log(logmod
, BFA_AEN_PORT_DISABLE
, pwwn_ptr
);
141 case BFA_PORT_AEN_DISCONNECT
:
142 bfa_log(logmod
, BFA_AEN_PORT_DISCONNECT
, pwwn_ptr
);
144 case BFA_PORT_AEN_QOS_NEG
:
145 bfa_log(logmod
, BFA_AEN_PORT_QOS_NEG
, pwwn_ptr
);
151 bfa_ioc_get_attr(&pport
->bfa
->ioc
, &ioc_attr
);
152 aen_data
.port
.ioc_type
= ioc_attr
.ioc_type
;
153 aen_data
.port
.pwwn
= pwwn
;
157 bfa_pport_sm_uninit(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
159 bfa_trc(pport
->bfa
, event
);
162 case BFA_PPORT_SM_START
:
164 * Start event after IOC is configured and BFA is started.
166 if (bfa_pport_send_enable(pport
))
167 bfa_sm_set_state(pport
, bfa_pport_sm_enabling
);
169 bfa_sm_set_state(pport
, bfa_pport_sm_enabling_qwait
);
172 case BFA_PPORT_SM_ENABLE
:
174 * Port is persistently configured to be in enabled state. Do
175 * not change state. Port enabling is done when START event is
180 case BFA_PPORT_SM_DISABLE
:
182 * If a port is persistently configured to be disabled, the
183 * first event will a port disable request.
185 bfa_sm_set_state(pport
, bfa_pport_sm_disabled
);
188 case BFA_PPORT_SM_HWFAIL
:
189 bfa_sm_set_state(pport
, bfa_pport_sm_iocdown
);
193 bfa_sm_fault(pport
->bfa
, event
);
198 bfa_pport_sm_enabling_qwait(struct bfa_pport_s
*pport
,
199 enum bfa_pport_sm_event event
)
201 bfa_trc(pport
->bfa
, event
);
204 case BFA_PPORT_SM_QRESUME
:
205 bfa_sm_set_state(pport
, bfa_pport_sm_enabling
);
206 bfa_pport_send_enable(pport
);
209 case BFA_PPORT_SM_STOP
:
210 bfa_reqq_wcancel(&pport
->reqq_wait
);
211 bfa_sm_set_state(pport
, bfa_pport_sm_stopped
);
214 case BFA_PPORT_SM_ENABLE
:
216 * Already enable is in progress.
220 case BFA_PPORT_SM_DISABLE
:
222 * Just send disable request to firmware when room becomes
223 * available in request queue.
225 bfa_sm_set_state(pport
, bfa_pport_sm_disabled
);
226 bfa_reqq_wcancel(&pport
->reqq_wait
);
227 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
228 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
229 bfa_pport_aen_post(pport
, BFA_PORT_AEN_DISABLE
);
232 case BFA_PPORT_SM_LINKUP
:
233 case BFA_PPORT_SM_LINKDOWN
:
235 * Possible to get link events when doing back-to-back
240 case BFA_PPORT_SM_HWFAIL
:
241 bfa_reqq_wcancel(&pport
->reqq_wait
);
242 bfa_sm_set_state(pport
, bfa_pport_sm_iocdown
);
246 bfa_sm_fault(pport
->bfa
, event
);
251 bfa_pport_sm_enabling(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
253 bfa_trc(pport
->bfa
, event
);
256 case BFA_PPORT_SM_FWRSP
:
257 case BFA_PPORT_SM_LINKDOWN
:
258 bfa_sm_set_state(pport
, bfa_pport_sm_linkdown
);
261 case BFA_PPORT_SM_LINKUP
:
262 bfa_pport_update_linkinfo(pport
);
263 bfa_sm_set_state(pport
, bfa_pport_sm_linkup
);
265 bfa_assert(pport
->event_cbfn
);
266 bfa_pport_callback(pport
, BFA_PPORT_LINKUP
);
269 case BFA_PPORT_SM_ENABLE
:
271 * Already being enabled.
275 case BFA_PPORT_SM_DISABLE
:
276 if (bfa_pport_send_disable(pport
))
277 bfa_sm_set_state(pport
, bfa_pport_sm_disabling
);
279 bfa_sm_set_state(pport
, bfa_pport_sm_disabling_qwait
);
281 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
282 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
283 bfa_pport_aen_post(pport
, BFA_PORT_AEN_DISABLE
);
286 case BFA_PPORT_SM_STOP
:
287 bfa_sm_set_state(pport
, bfa_pport_sm_stopped
);
290 case BFA_PPORT_SM_HWFAIL
:
291 bfa_sm_set_state(pport
, bfa_pport_sm_iocdown
);
295 bfa_sm_fault(pport
->bfa
, event
);
300 bfa_pport_sm_linkdown(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
302 bfa_trc(pport
->bfa
, event
);
305 case BFA_PPORT_SM_LINKUP
:
306 bfa_pport_update_linkinfo(pport
);
307 bfa_sm_set_state(pport
, bfa_pport_sm_linkup
);
308 bfa_assert(pport
->event_cbfn
);
309 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
310 BFA_PL_EID_PORT_ST_CHANGE
, 0, "Port Linkup");
311 bfa_pport_callback(pport
, BFA_PPORT_LINKUP
);
312 bfa_pport_aen_post(pport
, BFA_PORT_AEN_ONLINE
);
314 * If QoS is enabled and it is not online,
315 * Send a separate event.
317 if ((pport
->cfg
.qos_enabled
)
318 && (bfa_os_ntohl(pport
->qos_attr
.state
) != BFA_QOS_ONLINE
))
319 bfa_pport_aen_post(pport
, BFA_PORT_AEN_QOS_NEG
);
323 case BFA_PPORT_SM_LINKDOWN
:
325 * Possible to get link down event.
329 case BFA_PPORT_SM_ENABLE
:
335 case BFA_PPORT_SM_DISABLE
:
336 if (bfa_pport_send_disable(pport
))
337 bfa_sm_set_state(pport
, bfa_pport_sm_disabling
);
339 bfa_sm_set_state(pport
, bfa_pport_sm_disabling_qwait
);
341 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
342 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
343 bfa_pport_aen_post(pport
, BFA_PORT_AEN_DISABLE
);
346 case BFA_PPORT_SM_STOP
:
347 bfa_sm_set_state(pport
, bfa_pport_sm_stopped
);
350 case BFA_PPORT_SM_HWFAIL
:
351 bfa_sm_set_state(pport
, bfa_pport_sm_iocdown
);
355 bfa_sm_fault(pport
->bfa
, event
);
360 bfa_pport_sm_linkup(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
362 bfa_trc(pport
->bfa
, event
);
365 case BFA_PPORT_SM_ENABLE
:
371 case BFA_PPORT_SM_DISABLE
:
372 if (bfa_pport_send_disable(pport
))
373 bfa_sm_set_state(pport
, bfa_pport_sm_disabling
);
375 bfa_sm_set_state(pport
, bfa_pport_sm_disabling_qwait
);
377 bfa_pport_reset_linkinfo(pport
);
378 bfa_pport_callback(pport
, BFA_PPORT_LINKDOWN
);
379 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
380 BFA_PL_EID_PORT_DISABLE
, 0, "Port Disable");
381 bfa_pport_aen_post(pport
, BFA_PORT_AEN_OFFLINE
);
382 bfa_pport_aen_post(pport
, BFA_PORT_AEN_DISABLE
);
385 case BFA_PPORT_SM_LINKDOWN
:
386 bfa_sm_set_state(pport
, bfa_pport_sm_linkdown
);
387 bfa_pport_reset_linkinfo(pport
);
388 bfa_pport_callback(pport
, BFA_PPORT_LINKDOWN
);
389 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
390 BFA_PL_EID_PORT_ST_CHANGE
, 0, "Port Linkdown");
391 if (BFA_PORT_IS_DISABLED(pport
->bfa
)) {
392 bfa_pport_aen_post(pport
, BFA_PORT_AEN_OFFLINE
);
394 bfa_pport_aen_post(pport
, BFA_PORT_AEN_DISCONNECT
);
398 case BFA_PPORT_SM_STOP
:
399 bfa_sm_set_state(pport
, bfa_pport_sm_stopped
);
400 bfa_pport_reset_linkinfo(pport
);
401 if (BFA_PORT_IS_DISABLED(pport
->bfa
)) {
402 bfa_pport_aen_post(pport
, BFA_PORT_AEN_OFFLINE
);
404 bfa_pport_aen_post(pport
, BFA_PORT_AEN_DISCONNECT
);
408 case BFA_PPORT_SM_HWFAIL
:
409 bfa_sm_set_state(pport
, bfa_pport_sm_iocdown
);
410 bfa_pport_reset_linkinfo(pport
);
411 bfa_pport_callback(pport
, BFA_PPORT_LINKDOWN
);
412 if (BFA_PORT_IS_DISABLED(pport
->bfa
)) {
413 bfa_pport_aen_post(pport
, BFA_PORT_AEN_OFFLINE
);
415 bfa_pport_aen_post(pport
, BFA_PORT_AEN_DISCONNECT
);
420 bfa_sm_fault(pport
->bfa
, event
);
425 bfa_pport_sm_disabling_qwait(struct bfa_pport_s
*pport
,
426 enum bfa_pport_sm_event event
)
428 bfa_trc(pport
->bfa
, event
);
431 case BFA_PPORT_SM_QRESUME
:
432 bfa_sm_set_state(pport
, bfa_pport_sm_disabling
);
433 bfa_pport_send_disable(pport
);
436 case BFA_PPORT_SM_STOP
:
437 bfa_sm_set_state(pport
, bfa_pport_sm_stopped
);
438 bfa_reqq_wcancel(&pport
->reqq_wait
);
441 case BFA_PPORT_SM_DISABLE
:
443 * Already being disabled.
447 case BFA_PPORT_SM_LINKUP
:
448 case BFA_PPORT_SM_LINKDOWN
:
450 * Possible to get link events when doing back-to-back
455 case BFA_PPORT_SM_HWFAIL
:
456 bfa_sm_set_state(pport
, bfa_pport_sm_iocfail
);
457 bfa_reqq_wcancel(&pport
->reqq_wait
);
461 bfa_sm_fault(pport
->bfa
, event
);
466 bfa_pport_sm_disabling(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
468 bfa_trc(pport
->bfa
, event
);
471 case BFA_PPORT_SM_FWRSP
:
472 bfa_sm_set_state(pport
, bfa_pport_sm_disabled
);
475 case BFA_PPORT_SM_DISABLE
:
477 * Already being disabled.
481 case BFA_PPORT_SM_ENABLE
:
482 if (bfa_pport_send_enable(pport
))
483 bfa_sm_set_state(pport
, bfa_pport_sm_enabling
);
485 bfa_sm_set_state(pport
, bfa_pport_sm_enabling_qwait
);
487 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
488 BFA_PL_EID_PORT_ENABLE
, 0, "Port Enable");
489 bfa_pport_aen_post(pport
, BFA_PORT_AEN_ENABLE
);
492 case BFA_PPORT_SM_STOP
:
493 bfa_sm_set_state(pport
, bfa_pport_sm_stopped
);
496 case BFA_PPORT_SM_LINKUP
:
497 case BFA_PPORT_SM_LINKDOWN
:
499 * Possible to get link events when doing back-to-back
504 case BFA_PPORT_SM_HWFAIL
:
505 bfa_sm_set_state(pport
, bfa_pport_sm_iocfail
);
509 bfa_sm_fault(pport
->bfa
, event
);
514 bfa_pport_sm_disabled(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
516 bfa_trc(pport
->bfa
, event
);
519 case BFA_PPORT_SM_START
:
521 * Ignore start event for a port that is disabled.
525 case BFA_PPORT_SM_STOP
:
526 bfa_sm_set_state(pport
, bfa_pport_sm_stopped
);
529 case BFA_PPORT_SM_ENABLE
:
530 if (bfa_pport_send_enable(pport
))
531 bfa_sm_set_state(pport
, bfa_pport_sm_enabling
);
533 bfa_sm_set_state(pport
, bfa_pport_sm_enabling_qwait
);
535 bfa_plog_str(pport
->bfa
->plog
, BFA_PL_MID_HAL
,
536 BFA_PL_EID_PORT_ENABLE
, 0, "Port Enable");
537 bfa_pport_aen_post(pport
, BFA_PORT_AEN_ENABLE
);
540 case BFA_PPORT_SM_DISABLE
:
546 case BFA_PPORT_SM_HWFAIL
:
547 bfa_sm_set_state(pport
, bfa_pport_sm_iocfail
);
551 bfa_sm_fault(pport
->bfa
, event
);
556 bfa_pport_sm_stopped(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
558 bfa_trc(pport
->bfa
, event
);
561 case BFA_PPORT_SM_START
:
562 if (bfa_pport_send_enable(pport
))
563 bfa_sm_set_state(pport
, bfa_pport_sm_enabling
);
565 bfa_sm_set_state(pport
, bfa_pport_sm_enabling_qwait
);
570 * Ignore all other events.
577 * Port is enabled. IOC is down/failed.
580 bfa_pport_sm_iocdown(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
582 bfa_trc(pport
->bfa
, event
);
585 case BFA_PPORT_SM_START
:
586 if (bfa_pport_send_enable(pport
))
587 bfa_sm_set_state(pport
, bfa_pport_sm_enabling
);
589 bfa_sm_set_state(pport
, bfa_pport_sm_enabling_qwait
);
601 * Port is disabled. IOC is down/failed.
604 bfa_pport_sm_iocfail(struct bfa_pport_s
*pport
, enum bfa_pport_sm_event event
)
606 bfa_trc(pport
->bfa
, event
);
609 case BFA_PPORT_SM_START
:
610 bfa_sm_set_state(pport
, bfa_pport_sm_disabled
);
613 case BFA_PPORT_SM_ENABLE
:
614 bfa_sm_set_state(pport
, bfa_pport_sm_iocdown
);
632 __bfa_cb_port_event(void *cbarg
, bfa_boolean_t complete
)
634 struct bfa_pport_s
*pport
= cbarg
;
637 pport
->event_cbfn(pport
->event_cbarg
, pport
->hcb_event
);
640 #define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
644 bfa_pport_meminfo(struct bfa_iocfc_cfg_s
*cfg
, u32
*ndm_len
,
647 *dm_len
+= PPORT_STATS_DMA_SZ
;
651 bfa_pport_qresume(void *cbarg
)
653 struct bfa_pport_s
*port
= cbarg
;
655 bfa_sm_send_event(port
, BFA_PPORT_SM_QRESUME
);
659 bfa_pport_mem_claim(struct bfa_pport_s
*pport
, struct bfa_meminfo_s
*meminfo
)
664 dm_kva
= bfa_meminfo_dma_virt(meminfo
);
665 dm_pa
= bfa_meminfo_dma_phys(meminfo
);
667 pport
->stats_kva
= dm_kva
;
668 pport
->stats_pa
= dm_pa
;
669 pport
->stats
= (union bfa_pport_stats_u
*)dm_kva
;
671 dm_kva
+= PPORT_STATS_DMA_SZ
;
672 dm_pa
+= PPORT_STATS_DMA_SZ
;
674 bfa_meminfo_dma_virt(meminfo
) = dm_kva
;
675 bfa_meminfo_dma_phys(meminfo
) = dm_pa
;
679 * Memory initialization.
682 bfa_pport_attach(struct bfa_s
*bfa
, void *bfad
, struct bfa_iocfc_cfg_s
*cfg
,
683 struct bfa_meminfo_s
*meminfo
, struct bfa_pcidev_s
*pcidev
)
685 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
686 struct bfa_pport_cfg_s
*port_cfg
= &pport
->cfg
;
688 bfa_os_memset(pport
, 0, sizeof(struct bfa_pport_s
));
691 bfa_pport_mem_claim(pport
, meminfo
);
693 bfa_sm_set_state(pport
, bfa_pport_sm_uninit
);
696 * initialize and set default configuration
698 port_cfg
->topology
= BFA_PPORT_TOPOLOGY_P2P
;
699 port_cfg
->speed
= BFA_PPORT_SPEED_AUTO
;
700 port_cfg
->trunked
= BFA_FALSE
;
701 port_cfg
->maxfrsize
= 0;
703 port_cfg
->trl_def_speed
= BFA_PPORT_SPEED_1GBPS
;
705 bfa_reqq_winit(&pport
->reqq_wait
, bfa_pport_qresume
, pport
);
709 bfa_pport_initdone(struct bfa_s
*bfa
)
711 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
714 * Initialize port attributes from IOC hardware data.
716 bfa_pport_set_wwns(pport
);
717 if (pport
->cfg
.maxfrsize
== 0)
718 pport
->cfg
.maxfrsize
= bfa_ioc_maxfrsize(&bfa
->ioc
);
719 pport
->cfg
.rx_bbcredit
= bfa_ioc_rx_bbcredit(&bfa
->ioc
);
720 pport
->speed_sup
= bfa_ioc_speed_sup(&bfa
->ioc
);
722 bfa_assert(pport
->cfg
.maxfrsize
);
723 bfa_assert(pport
->cfg
.rx_bbcredit
);
724 bfa_assert(pport
->speed_sup
);
728 bfa_pport_detach(struct bfa_s
*bfa
)
733 * Called when IOC is ready.
736 bfa_pport_start(struct bfa_s
*bfa
)
738 bfa_sm_send_event(BFA_PORT_MOD(bfa
), BFA_PPORT_SM_START
);
742 * Called before IOC is stopped.
745 bfa_pport_stop(struct bfa_s
*bfa
)
747 bfa_sm_send_event(BFA_PORT_MOD(bfa
), BFA_PPORT_SM_STOP
);
751 * Called when IOC failure is detected.
754 bfa_pport_iocdisable(struct bfa_s
*bfa
)
756 bfa_sm_send_event(BFA_PORT_MOD(bfa
), BFA_PPORT_SM_HWFAIL
);
760 bfa_pport_update_linkinfo(struct bfa_pport_s
*pport
)
762 struct bfi_pport_event_s
*pevent
= pport
->event_arg
.i2hmsg
.event
;
764 pport
->speed
= pevent
->link_state
.speed
;
765 pport
->topology
= pevent
->link_state
.topology
;
767 if (pport
->topology
== BFA_PPORT_TOPOLOGY_LOOP
)
768 pport
->myalpa
= pevent
->link_state
.tl
.loop_info
.myalpa
;
773 bfa_os_assign(pport
->qos_attr
, pevent
->link_state
.qos_attr
);
774 bfa_os_assign(pport
->qos_vc_attr
, pevent
->link_state
.qos_vc_attr
);
776 bfa_trc(pport
->bfa
, pport
->speed
);
777 bfa_trc(pport
->bfa
, pport
->topology
);
781 bfa_pport_reset_linkinfo(struct bfa_pport_s
*pport
)
783 pport
->speed
= BFA_PPORT_SPEED_UNKNOWN
;
784 pport
->topology
= BFA_PPORT_TOPOLOGY_NONE
;
788 * Send port enable message to firmware.
791 bfa_pport_send_enable(struct bfa_pport_s
*port
)
793 struct bfi_pport_enable_req_s
*m
;
796 * Increment message tag before queue check, so that responses to old
797 * requests are discarded.
802 * check for room in queue to send request now
804 m
= bfa_reqq_next(port
->bfa
, BFA_REQQ_PORT
);
806 bfa_reqq_wait(port
->bfa
, BFA_REQQ_PORT
, &port
->reqq_wait
);
810 bfi_h2i_set(m
->mh
, BFI_MC_FC_PORT
, BFI_PPORT_H2I_ENABLE_REQ
,
811 bfa_lpuid(port
->bfa
));
812 m
->nwwn
= port
->nwwn
;
813 m
->pwwn
= port
->pwwn
;
814 m
->port_cfg
= port
->cfg
;
815 m
->msgtag
= port
->msgtag
;
816 m
->port_cfg
.maxfrsize
= bfa_os_htons(port
->cfg
.maxfrsize
);
817 bfa_dma_be_addr_set(m
->stats_dma_addr
, port
->stats_pa
);
818 bfa_trc(port
->bfa
, m
->stats_dma_addr
.a32
.addr_lo
);
819 bfa_trc(port
->bfa
, m
->stats_dma_addr
.a32
.addr_hi
);
822 * queue I/O message to firmware
824 bfa_reqq_produce(port
->bfa
, BFA_REQQ_PORT
);
829 * Send port disable message to firmware.
832 bfa_pport_send_disable(struct bfa_pport_s
*port
)
834 bfi_pport_disable_req_t
*m
;
837 * Increment message tag before queue check, so that responses to old
838 * requests are discarded.
843 * check for room in queue to send request now
845 m
= bfa_reqq_next(port
->bfa
, BFA_REQQ_PORT
);
847 bfa_reqq_wait(port
->bfa
, BFA_REQQ_PORT
, &port
->reqq_wait
);
851 bfi_h2i_set(m
->mh
, BFI_MC_FC_PORT
, BFI_PPORT_H2I_DISABLE_REQ
,
852 bfa_lpuid(port
->bfa
));
853 m
->msgtag
= port
->msgtag
;
856 * queue I/O message to firmware
858 bfa_reqq_produce(port
->bfa
, BFA_REQQ_PORT
);
864 bfa_pport_set_wwns(struct bfa_pport_s
*port
)
866 port
->pwwn
= bfa_ioc_get_pwwn(&port
->bfa
->ioc
);
867 port
->nwwn
= bfa_ioc_get_nwwn(&port
->bfa
->ioc
);
869 bfa_trc(port
->bfa
, port
->pwwn
);
870 bfa_trc(port
->bfa
, port
->nwwn
);
874 bfa_port_send_txcredit(void *port_cbarg
)
877 struct bfa_pport_s
*port
= port_cbarg
;
878 struct bfi_pport_set_svc_params_req_s
*m
;
881 * check for room in queue to send request now
883 m
= bfa_reqq_next(port
->bfa
, BFA_REQQ_PORT
);
885 bfa_trc(port
->bfa
, port
->cfg
.tx_bbcredit
);
889 bfi_h2i_set(m
->mh
, BFI_MC_FC_PORT
, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ
,
890 bfa_lpuid(port
->bfa
));
891 m
->tx_bbcredit
= bfa_os_htons((u16
) port
->cfg
.tx_bbcredit
);
894 * queue I/O message to firmware
896 bfa_reqq_produce(port
->bfa
, BFA_REQQ_PORT
);
906 * Firmware message handler.
909 bfa_pport_isr(struct bfa_s
*bfa
, struct bfi_msg_s
*msg
)
911 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
912 union bfi_pport_i2h_msg_u i2hmsg
;
915 pport
->event_arg
.i2hmsg
= i2hmsg
;
917 switch (msg
->mhdr
.msg_id
) {
918 case BFI_PPORT_I2H_ENABLE_RSP
:
919 if (pport
->msgtag
== i2hmsg
.enable_rsp
->msgtag
)
920 bfa_sm_send_event(pport
, BFA_PPORT_SM_FWRSP
);
923 case BFI_PPORT_I2H_DISABLE_RSP
:
924 if (pport
->msgtag
== i2hmsg
.enable_rsp
->msgtag
)
925 bfa_sm_send_event(pport
, BFA_PPORT_SM_FWRSP
);
928 case BFI_PPORT_I2H_EVENT
:
929 switch (i2hmsg
.event
->link_state
.linkstate
) {
930 case BFA_PPORT_LINKUP
:
931 bfa_sm_send_event(pport
, BFA_PPORT_SM_LINKUP
);
933 case BFA_PPORT_LINKDOWN
:
934 bfa_sm_send_event(pport
, BFA_PPORT_SM_LINKDOWN
);
936 case BFA_PPORT_TRUNK_LINKDOWN
:
937 /** todo: event notification */
942 case BFI_PPORT_I2H_GET_STATS_RSP
:
943 case BFI_PPORT_I2H_GET_QOS_STATS_RSP
:
945 * check for timer pop before processing the rsp
947 if (pport
->stats_busy
== BFA_FALSE
948 || pport
->stats_status
== BFA_STATUS_ETIMER
)
951 bfa_timer_stop(&pport
->timer
);
952 pport
->stats_status
= i2hmsg
.getstats_rsp
->status
;
953 bfa_cb_queue(pport
->bfa
, &pport
->hcb_qe
, __bfa_cb_port_stats
,
956 case BFI_PPORT_I2H_CLEAR_STATS_RSP
:
957 case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP
:
959 * check for timer pop before processing the rsp
961 if (pport
->stats_busy
== BFA_FALSE
962 || pport
->stats_status
== BFA_STATUS_ETIMER
)
965 bfa_timer_stop(&pport
->timer
);
966 pport
->stats_status
= BFA_STATUS_OK
;
967 bfa_cb_queue(pport
->bfa
, &pport
->hcb_qe
,
968 __bfa_cb_port_stats_clr
, pport
);
983 * Registered callback for port events.
986 bfa_pport_event_register(struct bfa_s
*bfa
,
987 void (*cbfn
) (void *cbarg
, bfa_pport_event_t event
),
990 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
992 pport
->event_cbfn
= cbfn
;
993 pport
->event_cbarg
= cbarg
;
997 bfa_pport_enable(struct bfa_s
*bfa
)
999 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1001 if (pport
->diag_busy
)
1002 return (BFA_STATUS_DIAG_BUSY
);
1003 else if (bfa_sm_cmp_state
1004 (BFA_PORT_MOD(bfa
), bfa_pport_sm_disabling_qwait
))
1005 return (BFA_STATUS_DEVBUSY
);
1007 bfa_sm_send_event(BFA_PORT_MOD(bfa
), BFA_PPORT_SM_ENABLE
);
1008 return BFA_STATUS_OK
;
1012 bfa_pport_disable(struct bfa_s
*bfa
)
1014 bfa_sm_send_event(BFA_PORT_MOD(bfa
), BFA_PPORT_SM_DISABLE
);
1015 return BFA_STATUS_OK
;
1019 * Configure port speed.
1022 bfa_pport_cfg_speed(struct bfa_s
*bfa
, enum bfa_pport_speed speed
)
1024 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1026 bfa_trc(bfa
, speed
);
1028 if ((speed
!= BFA_PPORT_SPEED_AUTO
) && (speed
> pport
->speed_sup
)) {
1029 bfa_trc(bfa
, pport
->speed_sup
);
1030 return BFA_STATUS_UNSUPP_SPEED
;
1033 pport
->cfg
.speed
= speed
;
1035 return (BFA_STATUS_OK
);
1039 * Get current speed.
1041 enum bfa_pport_speed
1042 bfa_pport_get_speed(struct bfa_s
*bfa
)
1044 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1050 * Configure port topology.
1053 bfa_pport_cfg_topology(struct bfa_s
*bfa
, enum bfa_pport_topology topology
)
1055 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1057 bfa_trc(bfa
, topology
);
1058 bfa_trc(bfa
, pport
->cfg
.topology
);
1061 case BFA_PPORT_TOPOLOGY_P2P
:
1062 case BFA_PPORT_TOPOLOGY_LOOP
:
1063 case BFA_PPORT_TOPOLOGY_AUTO
:
1067 return BFA_STATUS_EINVAL
;
1070 pport
->cfg
.topology
= topology
;
1071 return (BFA_STATUS_OK
);
1075 * Get current topology.
1077 enum bfa_pport_topology
1078 bfa_pport_get_topology(struct bfa_s
*bfa
)
1080 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1082 return port
->topology
;
1086 bfa_pport_cfg_hardalpa(struct bfa_s
*bfa
, u8 alpa
)
1088 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1091 bfa_trc(bfa
, pport
->cfg
.cfg_hardalpa
);
1092 bfa_trc(bfa
, pport
->cfg
.hardalpa
);
1094 pport
->cfg
.cfg_hardalpa
= BFA_TRUE
;
1095 pport
->cfg
.hardalpa
= alpa
;
1097 return (BFA_STATUS_OK
);
1101 bfa_pport_clr_hardalpa(struct bfa_s
*bfa
)
1103 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1105 bfa_trc(bfa
, pport
->cfg
.cfg_hardalpa
);
1106 bfa_trc(bfa
, pport
->cfg
.hardalpa
);
1108 pport
->cfg
.cfg_hardalpa
= BFA_FALSE
;
1109 return (BFA_STATUS_OK
);
1113 bfa_pport_get_hardalpa(struct bfa_s
*bfa
, u8
*alpa
)
1115 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1117 *alpa
= port
->cfg
.hardalpa
;
1118 return port
->cfg
.cfg_hardalpa
;
1122 bfa_pport_get_myalpa(struct bfa_s
*bfa
)
1124 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1126 return port
->myalpa
;
1130 bfa_pport_cfg_maxfrsize(struct bfa_s
*bfa
, u16 maxfrsize
)
1132 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1134 bfa_trc(bfa
, maxfrsize
);
1135 bfa_trc(bfa
, pport
->cfg
.maxfrsize
);
1140 if ((maxfrsize
> FC_MAX_PDUSZ
) || (maxfrsize
< FC_MIN_PDUSZ
))
1141 return (BFA_STATUS_INVLD_DFSZ
);
1144 * power of 2, if not the max frame size of 2112
1146 if ((maxfrsize
!= FC_MAX_PDUSZ
) && (maxfrsize
& (maxfrsize
- 1)))
1147 return (BFA_STATUS_INVLD_DFSZ
);
1149 pport
->cfg
.maxfrsize
= maxfrsize
;
1150 return (BFA_STATUS_OK
);
1154 bfa_pport_get_maxfrsize(struct bfa_s
*bfa
)
1156 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1158 return port
->cfg
.maxfrsize
;
1162 bfa_pport_mypid(struct bfa_s
*bfa
)
1164 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1170 bfa_pport_get_rx_bbcredit(struct bfa_s
*bfa
)
1172 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1174 return port
->cfg
.rx_bbcredit
;
1178 bfa_pport_set_tx_bbcredit(struct bfa_s
*bfa
, u16 tx_bbcredit
)
1180 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1182 port
->cfg
.tx_bbcredit
= (u8
) tx_bbcredit
;
1183 bfa_port_send_txcredit(port
);
1187 * Get port attributes.
1191 bfa_pport_get_wwn(struct bfa_s
*bfa
, bfa_boolean_t node
)
1193 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1201 bfa_pport_get_attr(struct bfa_s
*bfa
, struct bfa_pport_attr_s
*attr
)
1203 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1205 bfa_os_memset(attr
, 0, sizeof(struct bfa_pport_attr_s
));
1207 attr
->nwwn
= pport
->nwwn
;
1208 attr
->pwwn
= pport
->pwwn
;
1210 bfa_os_memcpy(&attr
->pport_cfg
, &pport
->cfg
,
1211 sizeof(struct bfa_pport_cfg_s
));
1215 attr
->pport_cfg
.speed
= pport
->cfg
.speed
;
1216 attr
->speed_supported
= pport
->speed_sup
;
1217 attr
->speed
= pport
->speed
;
1218 attr
->cos_supported
= FC_CLASS_3
;
1221 * topology attributes
1223 attr
->pport_cfg
.topology
= pport
->cfg
.topology
;
1224 attr
->topology
= pport
->topology
;
1229 attr
->beacon
= pport
->beacon
;
1230 attr
->link_e2e_beacon
= pport
->link_e2e_beacon
;
1231 attr
->plog_enabled
= bfa_plog_get_setting(pport
->bfa
->plog
);
1233 attr
->pport_cfg
.path_tov
= bfa_fcpim_path_tov_get(bfa
);
1234 attr
->pport_cfg
.q_depth
= bfa_fcpim_qdepth_get(bfa
);
1235 attr
->port_state
= bfa_sm_to_state(hal_pport_sm_table
, pport
->sm
);
1236 if (bfa_ioc_is_disabled(&pport
->bfa
->ioc
))
1237 attr
->port_state
= BFA_PPORT_ST_IOCDIS
;
1238 else if (bfa_ioc_fw_mismatch(&pport
->bfa
->ioc
))
1239 attr
->port_state
= BFA_PPORT_ST_FWMISMATCH
;
1243 bfa_port_stats_query(void *cbarg
)
1245 struct bfa_pport_s
*port
= (struct bfa_pport_s
*)cbarg
;
1246 bfi_pport_get_stats_req_t
*msg
;
1248 msg
= bfa_reqq_next(port
->bfa
, BFA_REQQ_PORT
);
1251 port
->stats_qfull
= BFA_TRUE
;
1252 bfa_reqq_winit(&port
->stats_reqq_wait
, bfa_port_stats_query
,
1254 bfa_reqq_wait(port
->bfa
, BFA_REQQ_PORT
, &port
->stats_reqq_wait
);
1257 port
->stats_qfull
= BFA_FALSE
;
1259 bfa_os_memset(msg
, 0, sizeof(bfi_pport_get_stats_req_t
));
1260 bfi_h2i_set(msg
->mh
, BFI_MC_FC_PORT
, BFI_PPORT_H2I_GET_STATS_REQ
,
1261 bfa_lpuid(port
->bfa
));
1262 bfa_reqq_produce(port
->bfa
, BFA_REQQ_PORT
);
1268 bfa_port_stats_clear(void *cbarg
)
1270 struct bfa_pport_s
*port
= (struct bfa_pport_s
*)cbarg
;
1271 bfi_pport_clear_stats_req_t
*msg
;
1273 msg
= bfa_reqq_next(port
->bfa
, BFA_REQQ_PORT
);
1276 port
->stats_qfull
= BFA_TRUE
;
1277 bfa_reqq_winit(&port
->stats_reqq_wait
, bfa_port_stats_clear
,
1279 bfa_reqq_wait(port
->bfa
, BFA_REQQ_PORT
, &port
->stats_reqq_wait
);
1282 port
->stats_qfull
= BFA_FALSE
;
1284 bfa_os_memset(msg
, 0, sizeof(bfi_pport_clear_stats_req_t
));
1285 bfi_h2i_set(msg
->mh
, BFI_MC_FC_PORT
, BFI_PPORT_H2I_CLEAR_STATS_REQ
,
1286 bfa_lpuid(port
->bfa
));
1287 bfa_reqq_produce(port
->bfa
, BFA_REQQ_PORT
);
1292 bfa_port_qos_stats_clear(void *cbarg
)
1294 struct bfa_pport_s
*port
= (struct bfa_pport_s
*)cbarg
;
1295 bfi_pport_clear_qos_stats_req_t
*msg
;
1297 msg
= bfa_reqq_next(port
->bfa
, BFA_REQQ_PORT
);
1300 port
->stats_qfull
= BFA_TRUE
;
1301 bfa_reqq_winit(&port
->stats_reqq_wait
, bfa_port_qos_stats_clear
,
1303 bfa_reqq_wait(port
->bfa
, BFA_REQQ_PORT
, &port
->stats_reqq_wait
);
1306 port
->stats_qfull
= BFA_FALSE
;
1308 bfa_os_memset(msg
, 0, sizeof(bfi_pport_clear_qos_stats_req_t
));
1309 bfi_h2i_set(msg
->mh
, BFI_MC_FC_PORT
, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ
,
1310 bfa_lpuid(port
->bfa
));
1311 bfa_reqq_produce(port
->bfa
, BFA_REQQ_PORT
);
1316 bfa_pport_stats_swap(union bfa_pport_stats_u
*d
, union bfa_pport_stats_u
*s
)
1318 u32
*dip
= (u32
*) d
;
1319 u32
*sip
= (u32
*) s
;
1323 * Do 64 bit fields swap first
1327 ((sizeof(union bfa_pport_stats_u
) -
1328 sizeof(struct bfa_qos_stats_s
)) / sizeof(u32
)); i
= i
+ 2) {
1330 dip
[i
] = bfa_os_ntohl(sip
[i
]);
1331 dip
[i
+ 1] = bfa_os_ntohl(sip
[i
+ 1]);
1333 dip
[i
] = bfa_os_ntohl(sip
[i
+ 1]);
1334 dip
[i
+ 1] = bfa_os_ntohl(sip
[i
]);
1339 * Now swap the 32 bit fields
1341 for (; i
< (sizeof(union bfa_pport_stats_u
) / sizeof(u32
)); ++i
)
1342 dip
[i
] = bfa_os_ntohl(sip
[i
]);
1346 __bfa_cb_port_stats_clr(void *cbarg
, bfa_boolean_t complete
)
1348 struct bfa_pport_s
*port
= cbarg
;
1351 port
->stats_cbfn(port
->stats_cbarg
, port
->stats_status
);
1353 port
->stats_busy
= BFA_FALSE
;
1354 port
->stats_status
= BFA_STATUS_OK
;
1359 bfa_port_stats_clr_timeout(void *cbarg
)
1361 struct bfa_pport_s
*port
= (struct bfa_pport_s
*)cbarg
;
1363 bfa_trc(port
->bfa
, port
->stats_qfull
);
1365 if (port
->stats_qfull
) {
1366 bfa_reqq_wcancel(&port
->stats_reqq_wait
);
1367 port
->stats_qfull
= BFA_FALSE
;
1370 port
->stats_status
= BFA_STATUS_ETIMER
;
1371 bfa_cb_queue(port
->bfa
, &port
->hcb_qe
, __bfa_cb_port_stats_clr
, port
);
1375 __bfa_cb_port_stats(void *cbarg
, bfa_boolean_t complete
)
1377 struct bfa_pport_s
*port
= cbarg
;
1380 if (port
->stats_status
== BFA_STATUS_OK
)
1381 bfa_pport_stats_swap(port
->stats_ret
, port
->stats
);
1382 port
->stats_cbfn(port
->stats_cbarg
, port
->stats_status
);
1384 port
->stats_busy
= BFA_FALSE
;
1385 port
->stats_status
= BFA_STATUS_OK
;
1390 bfa_port_stats_timeout(void *cbarg
)
1392 struct bfa_pport_s
*port
= (struct bfa_pport_s
*)cbarg
;
1394 bfa_trc(port
->bfa
, port
->stats_qfull
);
1396 if (port
->stats_qfull
) {
1397 bfa_reqq_wcancel(&port
->stats_reqq_wait
);
1398 port
->stats_qfull
= BFA_FALSE
;
1401 port
->stats_status
= BFA_STATUS_ETIMER
;
1402 bfa_cb_queue(port
->bfa
, &port
->hcb_qe
, __bfa_cb_port_stats
, port
);
1405 #define BFA_PORT_STATS_TOV 1000
1408 * Fetch port attributes.
1411 bfa_pport_get_stats(struct bfa_s
*bfa
, union bfa_pport_stats_u
*stats
,
1412 bfa_cb_pport_t cbfn
, void *cbarg
)
1414 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1416 if (port
->stats_busy
) {
1417 bfa_trc(bfa
, port
->stats_busy
);
1418 return (BFA_STATUS_DEVBUSY
);
1421 port
->stats_busy
= BFA_TRUE
;
1422 port
->stats_ret
= stats
;
1423 port
->stats_cbfn
= cbfn
;
1424 port
->stats_cbarg
= cbarg
;
1426 bfa_port_stats_query(port
);
1428 bfa_timer_start(bfa
, &port
->timer
, bfa_port_stats_timeout
, port
,
1429 BFA_PORT_STATS_TOV
);
1430 return (BFA_STATUS_OK
);
1434 bfa_pport_clear_stats(struct bfa_s
*bfa
, bfa_cb_pport_t cbfn
, void *cbarg
)
1436 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1438 if (port
->stats_busy
) {
1439 bfa_trc(bfa
, port
->stats_busy
);
1440 return (BFA_STATUS_DEVBUSY
);
1443 port
->stats_busy
= BFA_TRUE
;
1444 port
->stats_cbfn
= cbfn
;
1445 port
->stats_cbarg
= cbarg
;
1447 bfa_port_stats_clear(port
);
1449 bfa_timer_start(bfa
, &port
->timer
, bfa_port_stats_clr_timeout
, port
,
1450 BFA_PORT_STATS_TOV
);
1451 return (BFA_STATUS_OK
);
1455 bfa_pport_trunk_enable(struct bfa_s
*bfa
, u8 bitmap
)
1457 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1459 bfa_trc(bfa
, bitmap
);
1460 bfa_trc(bfa
, pport
->cfg
.trunked
);
1461 bfa_trc(bfa
, pport
->cfg
.trunk_ports
);
1463 if (!bitmap
|| (bitmap
& (bitmap
- 1)))
1464 return BFA_STATUS_EINVAL
;
1466 pport
->cfg
.trunked
= BFA_TRUE
;
1467 pport
->cfg
.trunk_ports
= bitmap
;
1469 return BFA_STATUS_OK
;
1473 bfa_pport_qos_get_attr(struct bfa_s
*bfa
, struct bfa_qos_attr_s
*qos_attr
)
1475 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1477 qos_attr
->state
= bfa_os_ntohl(pport
->qos_attr
.state
);
1478 qos_attr
->total_bb_cr
= bfa_os_ntohl(pport
->qos_attr
.total_bb_cr
);
1482 bfa_pport_qos_get_vc_attr(struct bfa_s
*bfa
,
1483 struct bfa_qos_vc_attr_s
*qos_vc_attr
)
1485 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1486 struct bfa_qos_vc_attr_s
*bfa_vc_attr
= &pport
->qos_vc_attr
;
1489 qos_vc_attr
->total_vc_count
= bfa_os_ntohs(bfa_vc_attr
->total_vc_count
);
1490 qos_vc_attr
->shared_credit
= bfa_os_ntohs(bfa_vc_attr
->shared_credit
);
1491 qos_vc_attr
->elp_opmode_flags
=
1492 bfa_os_ntohl(bfa_vc_attr
->elp_opmode_flags
);
1495 * Individual VC info
1497 while (i
< qos_vc_attr
->total_vc_count
) {
1498 qos_vc_attr
->vc_info
[i
].vc_credit
=
1499 bfa_vc_attr
->vc_info
[i
].vc_credit
;
1500 qos_vc_attr
->vc_info
[i
].borrow_credit
=
1501 bfa_vc_attr
->vc_info
[i
].borrow_credit
;
1502 qos_vc_attr
->vc_info
[i
].priority
=
1503 bfa_vc_attr
->vc_info
[i
].priority
;
1512 bfa_pport_get_qos_stats(struct bfa_s
*bfa
, union bfa_pport_stats_u
*stats
,
1513 bfa_cb_pport_t cbfn
, void *cbarg
)
1516 * QoS stats is embedded in port stats
1518 return (bfa_pport_get_stats(bfa
, stats
, cbfn
, cbarg
));
1522 bfa_pport_clear_qos_stats(struct bfa_s
*bfa
, bfa_cb_pport_t cbfn
, void *cbarg
)
1524 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1526 if (port
->stats_busy
) {
1527 bfa_trc(bfa
, port
->stats_busy
);
1528 return (BFA_STATUS_DEVBUSY
);
1531 port
->stats_busy
= BFA_TRUE
;
1532 port
->stats_cbfn
= cbfn
;
1533 port
->stats_cbarg
= cbarg
;
1535 bfa_port_qos_stats_clear(port
);
1537 bfa_timer_start(bfa
, &port
->timer
, bfa_port_stats_clr_timeout
, port
,
1538 BFA_PORT_STATS_TOV
);
1539 return (BFA_STATUS_OK
);
1543 * Fetch port attributes.
1546 bfa_pport_trunk_disable(struct bfa_s
*bfa
)
1548 return (BFA_STATUS_OK
);
1552 bfa_pport_trunk_query(struct bfa_s
*bfa
, u32
*bitmap
)
1554 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1556 *bitmap
= port
->cfg
.trunk_ports
;
1557 return port
->cfg
.trunked
;
1561 bfa_pport_is_disabled(struct bfa_s
*bfa
)
1563 struct bfa_pport_s
*port
= BFA_PORT_MOD(bfa
);
1565 return (bfa_sm_to_state(hal_pport_sm_table
, port
->sm
) ==
1566 BFA_PPORT_ST_DISABLED
);
1571 bfa_pport_is_ratelim(struct bfa_s
*bfa
)
1573 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1575 return (pport
->cfg
.ratelimit
? BFA_TRUE
: BFA_FALSE
);
1580 bfa_pport_cfg_qos(struct bfa_s
*bfa
, bfa_boolean_t on_off
)
1582 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1584 bfa_trc(bfa
, on_off
);
1585 bfa_trc(bfa
, pport
->cfg
.qos_enabled
);
1587 pport
->cfg
.qos_enabled
= on_off
;
1591 bfa_pport_cfg_ratelim(struct bfa_s
*bfa
, bfa_boolean_t on_off
)
1593 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1595 bfa_trc(bfa
, on_off
);
1596 bfa_trc(bfa
, pport
->cfg
.ratelimit
);
1598 pport
->cfg
.ratelimit
= on_off
;
1599 if (pport
->cfg
.trl_def_speed
== BFA_PPORT_SPEED_UNKNOWN
)
1600 pport
->cfg
.trl_def_speed
= BFA_PPORT_SPEED_1GBPS
;
1604 * Configure default minimum ratelim speed
1607 bfa_pport_cfg_ratelim_speed(struct bfa_s
*bfa
, enum bfa_pport_speed speed
)
1609 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1611 bfa_trc(bfa
, speed
);
1614 * Auto and speeds greater than the supported speed, are invalid
1616 if ((speed
== BFA_PPORT_SPEED_AUTO
) || (speed
> pport
->speed_sup
)) {
1617 bfa_trc(bfa
, pport
->speed_sup
);
1618 return BFA_STATUS_UNSUPP_SPEED
;
1621 pport
->cfg
.trl_def_speed
= speed
;
1623 return (BFA_STATUS_OK
);
1627 * Get default minimum ratelim speed
1629 enum bfa_pport_speed
1630 bfa_pport_get_ratelim_speed(struct bfa_s
*bfa
)
1632 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1634 bfa_trc(bfa
, pport
->cfg
.trl_def_speed
);
1635 return (pport
->cfg
.trl_def_speed
);
1640 bfa_pport_busy(struct bfa_s
*bfa
, bfa_boolean_t status
)
1642 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1644 bfa_trc(bfa
, status
);
1645 bfa_trc(bfa
, pport
->diag_busy
);
1647 pport
->diag_busy
= status
;
1651 bfa_pport_beacon(struct bfa_s
*bfa
, bfa_boolean_t beacon
,
1652 bfa_boolean_t link_e2e_beacon
)
1654 struct bfa_pport_s
*pport
= BFA_PORT_MOD(bfa
);
1656 bfa_trc(bfa
, beacon
);
1657 bfa_trc(bfa
, link_e2e_beacon
);
1658 bfa_trc(bfa
, pport
->beacon
);
1659 bfa_trc(bfa
, pport
->link_e2e_beacon
);
1661 pport
->beacon
= beacon
;
1662 pport
->link_e2e_beacon
= link_e2e_beacon
;
1666 bfa_pport_is_linkup(struct bfa_s
*bfa
)
1668 return bfa_sm_cmp_state(BFA_PORT_MOD(bfa
), bfa_pport_sm_linkup
);