1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright Gavin Shan, IBM Corporation 2016.
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
12 #include <linux/platform_device.h>
15 #include <net/net_namespace.h>
17 #include <net/addrconf.h>
19 #include <net/genetlink.h>
23 #include "ncsi-netlink.h"
25 LIST_HEAD(ncsi_dev_list
);
26 DEFINE_SPINLOCK(ncsi_dev_lock
);
28 bool ncsi_channel_has_link(struct ncsi_channel
*channel
)
30 return !!(channel
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1);
33 bool ncsi_channel_is_last(struct ncsi_dev_priv
*ndp
,
34 struct ncsi_channel
*channel
)
36 struct ncsi_package
*np
;
37 struct ncsi_channel
*nc
;
39 NCSI_FOR_EACH_PACKAGE(ndp
, np
)
40 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
43 if (nc
->state
== NCSI_CHANNEL_ACTIVE
&&
44 ncsi_channel_has_link(nc
))
51 static void ncsi_report_link(struct ncsi_dev_priv
*ndp
, bool force_down
)
53 struct ncsi_dev
*nd
= &ndp
->ndev
;
54 struct ncsi_package
*np
;
55 struct ncsi_channel
*nc
;
58 nd
->state
= ncsi_dev_state_functional
;
65 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
66 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
67 spin_lock_irqsave(&nc
->lock
, flags
);
69 if (!list_empty(&nc
->link
) ||
70 nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
71 spin_unlock_irqrestore(&nc
->lock
, flags
);
75 if (ncsi_channel_has_link(nc
)) {
76 spin_unlock_irqrestore(&nc
->lock
, flags
);
81 spin_unlock_irqrestore(&nc
->lock
, flags
);
89 static void ncsi_channel_monitor(struct timer_list
*t
)
91 struct ncsi_channel
*nc
= from_timer(nc
, t
, monitor
.timer
);
92 struct ncsi_package
*np
= nc
->package
;
93 struct ncsi_dev_priv
*ndp
= np
->ndp
;
94 struct ncsi_channel_mode
*ncm
;
95 struct ncsi_cmd_arg nca
;
96 bool enabled
, chained
;
97 unsigned int monitor_state
;
101 spin_lock_irqsave(&nc
->lock
, flags
);
103 chained
= !list_empty(&nc
->link
);
104 enabled
= nc
->monitor
.enabled
;
105 monitor_state
= nc
->monitor
.state
;
106 spin_unlock_irqrestore(&nc
->lock
, flags
);
109 return; /* expected race disabling timer */
110 if (WARN_ON_ONCE(chained
))
113 if (state
!= NCSI_CHANNEL_INACTIVE
&&
114 state
!= NCSI_CHANNEL_ACTIVE
) {
116 netdev_warn(ndp
->ndev
.dev
,
117 "Bad NCSI monitor state channel %d 0x%x %s queue\n",
118 nc
->id
, state
, chained
? "on" : "off");
119 spin_lock_irqsave(&nc
->lock
, flags
);
120 nc
->monitor
.enabled
= false;
121 spin_unlock_irqrestore(&nc
->lock
, flags
);
125 switch (monitor_state
) {
126 case NCSI_CHANNEL_MONITOR_START
:
127 case NCSI_CHANNEL_MONITOR_RETRY
:
129 nca
.package
= np
->id
;
130 nca
.channel
= nc
->id
;
131 nca
.type
= NCSI_PKT_CMD_GLS
;
133 ret
= ncsi_xmit_cmd(&nca
);
135 netdev_err(ndp
->ndev
.dev
, "Error %d sending GLS\n",
138 case NCSI_CHANNEL_MONITOR_WAIT
... NCSI_CHANNEL_MONITOR_WAIT_MAX
:
141 netdev_err(ndp
->ndev
.dev
, "NCSI Channel %d timed out!\n",
143 ncsi_report_link(ndp
, true);
144 ndp
->flags
|= NCSI_DEV_RESHUFFLE
;
146 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
147 spin_lock_irqsave(&nc
->lock
, flags
);
148 nc
->monitor
.enabled
= false;
149 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
150 ncm
->data
[2] &= ~0x1;
151 spin_unlock_irqrestore(&nc
->lock
, flags
);
153 spin_lock_irqsave(&ndp
->lock
, flags
);
154 nc
->state
= NCSI_CHANNEL_ACTIVE
;
155 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
156 spin_unlock_irqrestore(&ndp
->lock
, flags
);
157 ncsi_process_next_channel(ndp
);
161 spin_lock_irqsave(&nc
->lock
, flags
);
163 spin_unlock_irqrestore(&nc
->lock
, flags
);
164 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
167 void ncsi_start_channel_monitor(struct ncsi_channel
*nc
)
171 spin_lock_irqsave(&nc
->lock
, flags
);
172 WARN_ON_ONCE(nc
->monitor
.enabled
);
173 nc
->monitor
.enabled
= true;
174 nc
->monitor
.state
= NCSI_CHANNEL_MONITOR_START
;
175 spin_unlock_irqrestore(&nc
->lock
, flags
);
177 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
180 void ncsi_stop_channel_monitor(struct ncsi_channel
*nc
)
184 spin_lock_irqsave(&nc
->lock
, flags
);
185 if (!nc
->monitor
.enabled
) {
186 spin_unlock_irqrestore(&nc
->lock
, flags
);
189 nc
->monitor
.enabled
= false;
190 spin_unlock_irqrestore(&nc
->lock
, flags
);
192 del_timer_sync(&nc
->monitor
.timer
);
195 struct ncsi_channel
*ncsi_find_channel(struct ncsi_package
*np
,
198 struct ncsi_channel
*nc
;
200 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
208 struct ncsi_channel
*ncsi_add_channel(struct ncsi_package
*np
, unsigned char id
)
210 struct ncsi_channel
*nc
, *tmp
;
214 nc
= kzalloc(sizeof(*nc
), GFP_ATOMIC
);
220 nc
->state
= NCSI_CHANNEL_INACTIVE
;
221 nc
->monitor
.enabled
= false;
222 timer_setup(&nc
->monitor
.timer
, ncsi_channel_monitor
, 0);
223 spin_lock_init(&nc
->lock
);
224 INIT_LIST_HEAD(&nc
->link
);
225 for (index
= 0; index
< NCSI_CAP_MAX
; index
++)
226 nc
->caps
[index
].index
= index
;
227 for (index
= 0; index
< NCSI_MODE_MAX
; index
++)
228 nc
->modes
[index
].index
= index
;
230 spin_lock_irqsave(&np
->lock
, flags
);
231 tmp
= ncsi_find_channel(np
, id
);
233 spin_unlock_irqrestore(&np
->lock
, flags
);
238 list_add_tail_rcu(&nc
->node
, &np
->channels
);
240 spin_unlock_irqrestore(&np
->lock
, flags
);
245 static void ncsi_remove_channel(struct ncsi_channel
*nc
)
247 struct ncsi_package
*np
= nc
->package
;
250 spin_lock_irqsave(&nc
->lock
, flags
);
252 /* Release filters */
253 kfree(nc
->mac_filter
.addrs
);
254 kfree(nc
->vlan_filter
.vids
);
256 nc
->state
= NCSI_CHANNEL_INACTIVE
;
257 spin_unlock_irqrestore(&nc
->lock
, flags
);
258 ncsi_stop_channel_monitor(nc
);
260 /* Remove and free channel */
261 spin_lock_irqsave(&np
->lock
, flags
);
262 list_del_rcu(&nc
->node
);
264 spin_unlock_irqrestore(&np
->lock
, flags
);
269 struct ncsi_package
*ncsi_find_package(struct ncsi_dev_priv
*ndp
,
272 struct ncsi_package
*np
;
274 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
282 struct ncsi_package
*ncsi_add_package(struct ncsi_dev_priv
*ndp
,
285 struct ncsi_package
*np
, *tmp
;
288 np
= kzalloc(sizeof(*np
), GFP_ATOMIC
);
294 spin_lock_init(&np
->lock
);
295 INIT_LIST_HEAD(&np
->channels
);
296 np
->channel_whitelist
= UINT_MAX
;
298 spin_lock_irqsave(&ndp
->lock
, flags
);
299 tmp
= ncsi_find_package(ndp
, id
);
301 spin_unlock_irqrestore(&ndp
->lock
, flags
);
306 list_add_tail_rcu(&np
->node
, &ndp
->packages
);
308 spin_unlock_irqrestore(&ndp
->lock
, flags
);
313 void ncsi_remove_package(struct ncsi_package
*np
)
315 struct ncsi_dev_priv
*ndp
= np
->ndp
;
316 struct ncsi_channel
*nc
, *tmp
;
319 /* Release all child channels */
320 list_for_each_entry_safe(nc
, tmp
, &np
->channels
, node
)
321 ncsi_remove_channel(nc
);
323 /* Remove and free package */
324 spin_lock_irqsave(&ndp
->lock
, flags
);
325 list_del_rcu(&np
->node
);
327 spin_unlock_irqrestore(&ndp
->lock
, flags
);
332 void ncsi_find_package_and_channel(struct ncsi_dev_priv
*ndp
,
334 struct ncsi_package
**np
,
335 struct ncsi_channel
**nc
)
337 struct ncsi_package
*p
;
338 struct ncsi_channel
*c
;
340 p
= ncsi_find_package(ndp
, NCSI_PACKAGE_INDEX(id
));
341 c
= p
? ncsi_find_channel(p
, NCSI_CHANNEL_INDEX(id
)) : NULL
;
349 /* For two consecutive NCSI commands, the packet IDs shouldn't
350 * be same. Otherwise, the bogus response might be replied. So
351 * the available IDs are allocated in round-robin fashion.
353 struct ncsi_request
*ncsi_alloc_request(struct ncsi_dev_priv
*ndp
,
354 unsigned int req_flags
)
356 struct ncsi_request
*nr
= NULL
;
357 int i
, limit
= ARRAY_SIZE(ndp
->requests
);
360 /* Check if there is one available request until the ceiling */
361 spin_lock_irqsave(&ndp
->lock
, flags
);
362 for (i
= ndp
->request_id
; i
< limit
; i
++) {
363 if (ndp
->requests
[i
].used
)
366 nr
= &ndp
->requests
[i
];
368 nr
->flags
= req_flags
;
369 ndp
->request_id
= i
+ 1;
373 /* Fail back to check from the starting cursor */
374 for (i
= NCSI_REQ_START_IDX
; i
< ndp
->request_id
; i
++) {
375 if (ndp
->requests
[i
].used
)
378 nr
= &ndp
->requests
[i
];
380 nr
->flags
= req_flags
;
381 ndp
->request_id
= i
+ 1;
386 spin_unlock_irqrestore(&ndp
->lock
, flags
);
390 void ncsi_free_request(struct ncsi_request
*nr
)
392 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
393 struct sk_buff
*cmd
, *rsp
;
399 del_timer_sync(&nr
->timer
);
402 spin_lock_irqsave(&ndp
->lock
, flags
);
408 driven
= !!(nr
->flags
& NCSI_REQ_FLAG_EVENT_DRIVEN
);
409 spin_unlock_irqrestore(&ndp
->lock
, flags
);
411 if (driven
&& cmd
&& --ndp
->pending_req_num
== 0)
412 schedule_work(&ndp
->work
);
414 /* Release command and response */
419 struct ncsi_dev
*ncsi_find_dev(struct net_device
*dev
)
421 struct ncsi_dev_priv
*ndp
;
423 NCSI_FOR_EACH_DEV(ndp
) {
424 if (ndp
->ndev
.dev
== dev
)
431 static void ncsi_request_timeout(struct timer_list
*t
)
433 struct ncsi_request
*nr
= from_timer(nr
, t
, timer
);
434 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
435 struct ncsi_cmd_pkt
*cmd
;
436 struct ncsi_package
*np
;
437 struct ncsi_channel
*nc
;
440 /* If the request already had associated response,
441 * let the response handler to release it.
443 spin_lock_irqsave(&ndp
->lock
, flags
);
445 if (nr
->rsp
|| !nr
->cmd
) {
446 spin_unlock_irqrestore(&ndp
->lock
, flags
);
449 spin_unlock_irqrestore(&ndp
->lock
, flags
);
451 if (nr
->flags
== NCSI_REQ_FLAG_NETLINK_DRIVEN
) {
453 /* Find the package */
454 cmd
= (struct ncsi_cmd_pkt
*)
455 skb_network_header(nr
->cmd
);
456 ncsi_find_package_and_channel(ndp
,
457 cmd
->cmd
.common
.channel
,
459 ncsi_send_netlink_timeout(nr
, np
, nc
);
463 /* Release the request */
464 ncsi_free_request(nr
);
467 static void ncsi_suspend_channel(struct ncsi_dev_priv
*ndp
)
469 struct ncsi_dev
*nd
= &ndp
->ndev
;
470 struct ncsi_package
*np
;
471 struct ncsi_channel
*nc
, *tmp
;
472 struct ncsi_cmd_arg nca
;
476 np
= ndp
->active_package
;
477 nc
= ndp
->active_channel
;
479 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
481 case ncsi_dev_state_suspend
:
482 nd
->state
= ncsi_dev_state_suspend_select
;
484 case ncsi_dev_state_suspend_select
:
485 ndp
->pending_req_num
= 1;
487 nca
.type
= NCSI_PKT_CMD_SP
;
488 nca
.package
= np
->id
;
489 nca
.channel
= NCSI_RESERVED_CHANNEL
;
490 if (ndp
->flags
& NCSI_DEV_HWA
)
495 /* To retrieve the last link states of channels in current
496 * package when current active channel needs fail over to
497 * another one. It means we will possibly select another
498 * channel as next active one. The link states of channels
499 * are most important factor of the selection. So we need
500 * accurate link states. Unfortunately, the link states on
501 * inactive channels can't be updated with LSC AEN in time.
503 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
)
504 nd
->state
= ncsi_dev_state_suspend_gls
;
506 nd
->state
= ncsi_dev_state_suspend_dcnt
;
507 ret
= ncsi_xmit_cmd(&nca
);
512 case ncsi_dev_state_suspend_gls
:
513 ndp
->pending_req_num
= 1;
515 nca
.type
= NCSI_PKT_CMD_GLS
;
516 nca
.package
= np
->id
;
517 nca
.channel
= ndp
->channel_probe_id
;
518 ret
= ncsi_xmit_cmd(&nca
);
521 ndp
->channel_probe_id
++;
523 if (ndp
->channel_probe_id
== ndp
->channel_count
) {
524 ndp
->channel_probe_id
= 0;
525 nd
->state
= ncsi_dev_state_suspend_dcnt
;
529 case ncsi_dev_state_suspend_dcnt
:
530 ndp
->pending_req_num
= 1;
532 nca
.type
= NCSI_PKT_CMD_DCNT
;
533 nca
.package
= np
->id
;
534 nca
.channel
= nc
->id
;
536 nd
->state
= ncsi_dev_state_suspend_dc
;
537 ret
= ncsi_xmit_cmd(&nca
);
542 case ncsi_dev_state_suspend_dc
:
543 ndp
->pending_req_num
= 1;
545 nca
.type
= NCSI_PKT_CMD_DC
;
546 nca
.package
= np
->id
;
547 nca
.channel
= nc
->id
;
550 nd
->state
= ncsi_dev_state_suspend_deselect
;
551 ret
= ncsi_xmit_cmd(&nca
);
555 NCSI_FOR_EACH_CHANNEL(np
, tmp
) {
556 /* If there is another channel active on this package
557 * do not deselect the package.
559 if (tmp
!= nc
&& tmp
->state
== NCSI_CHANNEL_ACTIVE
) {
560 nd
->state
= ncsi_dev_state_suspend_done
;
565 case ncsi_dev_state_suspend_deselect
:
566 ndp
->pending_req_num
= 1;
568 nca
.type
= NCSI_PKT_CMD_DP
;
569 nca
.package
= np
->id
;
570 nca
.channel
= NCSI_RESERVED_CHANNEL
;
572 nd
->state
= ncsi_dev_state_suspend_done
;
573 ret
= ncsi_xmit_cmd(&nca
);
578 case ncsi_dev_state_suspend_done
:
579 spin_lock_irqsave(&nc
->lock
, flags
);
580 nc
->state
= NCSI_CHANNEL_INACTIVE
;
581 spin_unlock_irqrestore(&nc
->lock
, flags
);
582 if (ndp
->flags
& NCSI_DEV_RESET
)
585 ncsi_process_next_channel(ndp
);
588 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in suspend\n",
594 nd
->state
= ncsi_dev_state_functional
;
597 /* Check the VLAN filter bitmap for a set filter, and construct a
598 * "Set VLAN Filter - Disable" packet if found.
600 static int clear_one_vid(struct ncsi_dev_priv
*ndp
, struct ncsi_channel
*nc
,
601 struct ncsi_cmd_arg
*nca
)
603 struct ncsi_channel_vlan_filter
*ncf
;
609 ncf
= &nc
->vlan_filter
;
610 bitmap
= &ncf
->bitmap
;
612 spin_lock_irqsave(&nc
->lock
, flags
);
613 index
= find_first_bit(bitmap
, ncf
->n_vids
);
614 if (index
>= ncf
->n_vids
) {
615 spin_unlock_irqrestore(&nc
->lock
, flags
);
618 vid
= ncf
->vids
[index
];
620 clear_bit(index
, bitmap
);
621 ncf
->vids
[index
] = 0;
622 spin_unlock_irqrestore(&nc
->lock
, flags
);
624 nca
->type
= NCSI_PKT_CMD_SVF
;
626 /* HW filter index starts at 1 */
627 nca
->bytes
[6] = index
+ 1;
628 nca
->bytes
[7] = 0x00;
632 /* Find an outstanding VLAN tag and construct a "Set VLAN Filter - Enable"
635 static int set_one_vid(struct ncsi_dev_priv
*ndp
, struct ncsi_channel
*nc
,
636 struct ncsi_cmd_arg
*nca
)
638 struct ncsi_channel_vlan_filter
*ncf
;
639 struct vlan_vid
*vlan
= NULL
;
645 if (list_empty(&ndp
->vlan_vids
))
648 ncf
= &nc
->vlan_filter
;
649 bitmap
= &ncf
->bitmap
;
651 spin_lock_irqsave(&nc
->lock
, flags
);
654 list_for_each_entry_rcu(vlan
, &ndp
->vlan_vids
, list
) {
656 for (i
= 0; i
< ncf
->n_vids
; i
++)
657 if (ncf
->vids
[i
] == vid
) {
667 /* No VLAN ID is not set */
668 spin_unlock_irqrestore(&nc
->lock
, flags
);
672 index
= find_first_zero_bit(bitmap
, ncf
->n_vids
);
673 if (index
< 0 || index
>= ncf
->n_vids
) {
674 netdev_err(ndp
->ndev
.dev
,
675 "Channel %u already has all VLAN filters set\n",
677 spin_unlock_irqrestore(&nc
->lock
, flags
);
681 ncf
->vids
[index
] = vid
;
682 set_bit(index
, bitmap
);
683 spin_unlock_irqrestore(&nc
->lock
, flags
);
685 nca
->type
= NCSI_PKT_CMD_SVF
;
687 /* HW filter index starts at 1 */
688 nca
->bytes
[6] = index
+ 1;
689 nca
->bytes
[7] = 0x01;
694 static int ncsi_oem_keep_phy_intel(struct ncsi_cmd_arg
*nca
)
696 unsigned char data
[NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN
];
699 nca
->payload
= NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN
;
701 memset(data
, 0, NCSI_OEM_INTEL_CMD_KEEP_PHY_LEN
);
702 *(unsigned int *)data
= ntohl((__force __be32
)NCSI_OEM_MFR_INTEL_ID
);
704 data
[4] = NCSI_OEM_INTEL_CMD_KEEP_PHY
;
706 /* PHY Link up attribute */
711 ret
= ncsi_xmit_cmd(nca
);
713 netdev_err(nca
->ndp
->ndev
.dev
,
714 "NCSI: Failed to transmit cmd 0x%x during configure\n",
719 /* NCSI OEM Command APIs */
720 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg
*nca
)
722 unsigned char data
[NCSI_OEM_BCM_CMD_GMA_LEN
];
725 nca
->payload
= NCSI_OEM_BCM_CMD_GMA_LEN
;
727 memset(data
, 0, NCSI_OEM_BCM_CMD_GMA_LEN
);
728 *(unsigned int *)data
= ntohl((__force __be32
)NCSI_OEM_MFR_BCM_ID
);
729 data
[5] = NCSI_OEM_BCM_CMD_GMA
;
733 ret
= ncsi_xmit_cmd(nca
);
735 netdev_err(nca
->ndp
->ndev
.dev
,
736 "NCSI: Failed to transmit cmd 0x%x during configure\n",
741 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg
*nca
)
744 u8 data_u8
[NCSI_OEM_MLX_CMD_GMA_LEN
];
745 u32 data_u32
[NCSI_OEM_MLX_CMD_GMA_LEN
/ sizeof(u32
)];
749 nca
->payload
= NCSI_OEM_MLX_CMD_GMA_LEN
;
751 memset(&u
, 0, sizeof(u
));
752 u
.data_u32
[0] = ntohl((__force __be32
)NCSI_OEM_MFR_MLX_ID
);
753 u
.data_u8
[5] = NCSI_OEM_MLX_CMD_GMA
;
754 u
.data_u8
[6] = NCSI_OEM_MLX_CMD_GMA_PARAM
;
756 nca
->data
= u
.data_u8
;
758 ret
= ncsi_xmit_cmd(nca
);
760 netdev_err(nca
->ndp
->ndev
.dev
,
761 "NCSI: Failed to transmit cmd 0x%x during configure\n",
766 static int ncsi_oem_smaf_mlx(struct ncsi_cmd_arg
*nca
)
769 u8 data_u8
[NCSI_OEM_MLX_CMD_SMAF_LEN
];
770 u32 data_u32
[NCSI_OEM_MLX_CMD_SMAF_LEN
/ sizeof(u32
)];
774 memset(&u
, 0, sizeof(u
));
775 u
.data_u32
[0] = ntohl((__force __be32
)NCSI_OEM_MFR_MLX_ID
);
776 u
.data_u8
[5] = NCSI_OEM_MLX_CMD_SMAF
;
777 u
.data_u8
[6] = NCSI_OEM_MLX_CMD_SMAF_PARAM
;
778 memcpy(&u
.data_u8
[MLX_SMAF_MAC_ADDR_OFFSET
],
779 nca
->ndp
->ndev
.dev
->dev_addr
, ETH_ALEN
);
780 u
.data_u8
[MLX_SMAF_MED_SUPPORT_OFFSET
] =
781 (MLX_MC_RBT_AVL
| MLX_MC_RBT_SUPPORT
);
783 nca
->payload
= NCSI_OEM_MLX_CMD_SMAF_LEN
;
784 nca
->data
= u
.data_u8
;
786 ret
= ncsi_xmit_cmd(nca
);
788 netdev_err(nca
->ndp
->ndev
.dev
,
789 "NCSI: Failed to transmit cmd 0x%x during probe\n",
794 static int ncsi_oem_gma_handler_intel(struct ncsi_cmd_arg
*nca
)
796 unsigned char data
[NCSI_OEM_INTEL_CMD_GMA_LEN
];
799 nca
->payload
= NCSI_OEM_INTEL_CMD_GMA_LEN
;
801 memset(data
, 0, NCSI_OEM_INTEL_CMD_GMA_LEN
);
802 *(unsigned int *)data
= ntohl((__force __be32
)NCSI_OEM_MFR_INTEL_ID
);
803 data
[4] = NCSI_OEM_INTEL_CMD_GMA
;
807 ret
= ncsi_xmit_cmd(nca
);
809 netdev_err(nca
->ndp
->ndev
.dev
,
810 "NCSI: Failed to transmit cmd 0x%x during configure\n",
816 /* OEM Command handlers initialization */
817 static struct ncsi_oem_gma_handler
{
819 int (*handler
)(struct ncsi_cmd_arg
*nca
);
820 } ncsi_oem_gma_handlers
[] = {
821 { NCSI_OEM_MFR_BCM_ID
, ncsi_oem_gma_handler_bcm
},
822 { NCSI_OEM_MFR_MLX_ID
, ncsi_oem_gma_handler_mlx
},
823 { NCSI_OEM_MFR_INTEL_ID
, ncsi_oem_gma_handler_intel
}
826 static int ncsi_gma_handler(struct ncsi_cmd_arg
*nca
, unsigned int mf_id
)
828 struct ncsi_oem_gma_handler
*nch
= NULL
;
831 /* This function should only be called once, return if flag set */
832 if (nca
->ndp
->gma_flag
== 1)
835 /* Find gma handler for given manufacturer id */
836 for (i
= 0; i
< ARRAY_SIZE(ncsi_oem_gma_handlers
); i
++) {
837 if (ncsi_oem_gma_handlers
[i
].mfr_id
== mf_id
) {
838 if (ncsi_oem_gma_handlers
[i
].handler
)
839 nch
= &ncsi_oem_gma_handlers
[i
];
845 netdev_err(nca
->ndp
->ndev
.dev
,
846 "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
851 /* Get Mac address from NCSI device */
852 return nch
->handler(nca
);
855 /* Determine if a given channel from the channel_queue should be used for Tx */
856 static bool ncsi_channel_is_tx(struct ncsi_dev_priv
*ndp
,
857 struct ncsi_channel
*nc
)
859 struct ncsi_channel_mode
*ncm
;
860 struct ncsi_channel
*channel
;
861 struct ncsi_package
*np
;
863 /* Check if any other channel has Tx enabled; a channel may have already
864 * been configured and removed from the channel queue.
866 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
867 if (!ndp
->multi_package
&& np
!= nc
->package
)
869 NCSI_FOR_EACH_CHANNEL(np
, channel
) {
870 ncm
= &channel
->modes
[NCSI_MODE_TX_ENABLE
];
876 /* This channel is the preferred channel and has link */
877 list_for_each_entry_rcu(channel
, &ndp
->channel_queue
, link
) {
878 np
= channel
->package
;
879 if (np
->preferred_channel
&&
880 ncsi_channel_has_link(np
->preferred_channel
)) {
881 return np
->preferred_channel
== nc
;
885 /* This channel has link */
886 if (ncsi_channel_has_link(nc
))
889 list_for_each_entry_rcu(channel
, &ndp
->channel_queue
, link
)
890 if (ncsi_channel_has_link(channel
))
893 /* No other channel has link; default to this one */
897 /* Change the active Tx channel in a multi-channel setup */
898 int ncsi_update_tx_channel(struct ncsi_dev_priv
*ndp
,
899 struct ncsi_package
*package
,
900 struct ncsi_channel
*disable
,
901 struct ncsi_channel
*enable
)
903 struct ncsi_cmd_arg nca
;
904 struct ncsi_channel
*nc
;
905 struct ncsi_package
*np
;
908 if (!package
->multi_channel
&& !ndp
->multi_package
)
909 netdev_warn(ndp
->ndev
.dev
,
910 "NCSI: Trying to update Tx channel in single-channel mode\n");
914 /* Find current channel with Tx enabled */
915 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
918 if (!ndp
->multi_package
&& np
!= package
)
921 NCSI_FOR_EACH_CHANNEL(np
, nc
)
922 if (nc
->modes
[NCSI_MODE_TX_ENABLE
].enable
) {
928 /* Find a suitable channel for Tx */
929 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
932 if (!ndp
->multi_package
&& np
!= package
)
934 if (!(ndp
->package_whitelist
& (0x1 << np
->id
)))
937 if (np
->preferred_channel
&&
938 ncsi_channel_has_link(np
->preferred_channel
)) {
939 enable
= np
->preferred_channel
;
943 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
944 if (!(np
->channel_whitelist
& 0x1 << nc
->id
))
946 if (nc
->state
!= NCSI_CHANNEL_ACTIVE
)
948 if (ncsi_channel_has_link(nc
)) {
955 if (disable
== enable
)
962 nca
.channel
= disable
->id
;
963 nca
.package
= disable
->package
->id
;
964 nca
.type
= NCSI_PKT_CMD_DCNT
;
965 ret
= ncsi_xmit_cmd(&nca
);
967 netdev_err(ndp
->ndev
.dev
,
968 "Error %d sending DCNT\n",
972 netdev_info(ndp
->ndev
.dev
, "NCSI: channel %u enables Tx\n", enable
->id
);
974 nca
.channel
= enable
->id
;
975 nca
.package
= enable
->package
->id
;
976 nca
.type
= NCSI_PKT_CMD_ECNT
;
977 ret
= ncsi_xmit_cmd(&nca
);
979 netdev_err(ndp
->ndev
.dev
,
980 "Error %d sending ECNT\n",
986 static void ncsi_configure_channel(struct ncsi_dev_priv
*ndp
)
988 struct ncsi_package
*np
= ndp
->active_package
;
989 struct ncsi_channel
*nc
= ndp
->active_channel
;
990 struct ncsi_channel
*hot_nc
= NULL
;
991 struct ncsi_dev
*nd
= &ndp
->ndev
;
992 struct net_device
*dev
= nd
->dev
;
993 struct ncsi_cmd_arg nca
;
999 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
1000 switch (nd
->state
) {
1001 case ncsi_dev_state_config
:
1002 case ncsi_dev_state_config_sp
:
1003 ndp
->pending_req_num
= 1;
1005 /* Select the specific package */
1006 nca
.type
= NCSI_PKT_CMD_SP
;
1007 if (ndp
->flags
& NCSI_DEV_HWA
)
1011 nca
.package
= np
->id
;
1012 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1013 ret
= ncsi_xmit_cmd(&nca
);
1015 netdev_err(ndp
->ndev
.dev
,
1016 "NCSI: Failed to transmit CMD_SP\n");
1020 nd
->state
= ncsi_dev_state_config_cis
;
1022 case ncsi_dev_state_config_cis
:
1023 ndp
->pending_req_num
= 1;
1025 /* Clear initial state */
1026 nca
.type
= NCSI_PKT_CMD_CIS
;
1027 nca
.package
= np
->id
;
1028 nca
.channel
= nc
->id
;
1029 ret
= ncsi_xmit_cmd(&nca
);
1031 netdev_err(ndp
->ndev
.dev
,
1032 "NCSI: Failed to transmit CMD_CIS\n");
1036 nd
->state
= IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC
)
1037 ? ncsi_dev_state_config_oem_gma
1038 : ncsi_dev_state_config_clear_vids
;
1040 case ncsi_dev_state_config_oem_gma
:
1041 nd
->state
= ncsi_dev_state_config_clear_vids
;
1043 nca
.package
= np
->id
;
1044 nca
.channel
= nc
->id
;
1045 ndp
->pending_req_num
= 1;
1046 if (nc
->version
.major
>= 1 && nc
->version
.minor
>= 2) {
1047 nca
.type
= NCSI_PKT_CMD_GMCMA
;
1048 ret
= ncsi_xmit_cmd(&nca
);
1050 nca
.type
= NCSI_PKT_CMD_OEM
;
1051 ret
= ncsi_gma_handler(&nca
, nc
->version
.mf_id
);
1054 schedule_work(&ndp
->work
);
1057 case ncsi_dev_state_config_clear_vids
:
1058 case ncsi_dev_state_config_svf
:
1059 case ncsi_dev_state_config_ev
:
1060 case ncsi_dev_state_config_sma
:
1061 case ncsi_dev_state_config_ebf
:
1062 case ncsi_dev_state_config_dgmf
:
1063 case ncsi_dev_state_config_ecnt
:
1064 case ncsi_dev_state_config_ec
:
1065 case ncsi_dev_state_config_ae
:
1066 case ncsi_dev_state_config_gls
:
1067 ndp
->pending_req_num
= 1;
1069 nca
.package
= np
->id
;
1070 nca
.channel
= nc
->id
;
1072 /* Clear any active filters on the channel before setting */
1073 if (nd
->state
== ncsi_dev_state_config_clear_vids
) {
1074 ret
= clear_one_vid(ndp
, nc
, &nca
);
1076 nd
->state
= ncsi_dev_state_config_svf
;
1077 schedule_work(&ndp
->work
);
1081 nd
->state
= ncsi_dev_state_config_clear_vids
;
1082 /* Add known VLAN tags to the filter */
1083 } else if (nd
->state
== ncsi_dev_state_config_svf
) {
1084 ret
= set_one_vid(ndp
, nc
, &nca
);
1086 nd
->state
= ncsi_dev_state_config_ev
;
1087 schedule_work(&ndp
->work
);
1091 nd
->state
= ncsi_dev_state_config_svf
;
1092 /* Enable/Disable the VLAN filter */
1093 } else if (nd
->state
== ncsi_dev_state_config_ev
) {
1094 if (list_empty(&ndp
->vlan_vids
)) {
1095 nca
.type
= NCSI_PKT_CMD_DV
;
1097 nca
.type
= NCSI_PKT_CMD_EV
;
1098 nca
.bytes
[3] = NCSI_CAP_VLAN_NO
;
1100 nd
->state
= ncsi_dev_state_config_sma
;
1101 } else if (nd
->state
== ncsi_dev_state_config_sma
) {
1102 /* Use first entry in unicast filter table. Note that
1103 * the MAC filter table starts from entry 1 instead of
1106 nca
.type
= NCSI_PKT_CMD_SMA
;
1107 for (index
= 0; index
< 6; index
++)
1108 nca
.bytes
[index
] = dev
->dev_addr
[index
];
1111 nd
->state
= ncsi_dev_state_config_ebf
;
1112 } else if (nd
->state
== ncsi_dev_state_config_ebf
) {
1113 nca
.type
= NCSI_PKT_CMD_EBF
;
1114 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_BC
].cap
;
1115 /* if multicast global filtering is supported then
1116 * disable it so that all multicast packet will be
1117 * forwarded to management controller
1119 if (nc
->caps
[NCSI_CAP_GENERIC
].cap
&
1120 NCSI_CAP_GENERIC_MC
)
1121 nd
->state
= ncsi_dev_state_config_dgmf
;
1122 else if (ncsi_channel_is_tx(ndp
, nc
))
1123 nd
->state
= ncsi_dev_state_config_ecnt
;
1125 nd
->state
= ncsi_dev_state_config_ec
;
1126 } else if (nd
->state
== ncsi_dev_state_config_dgmf
) {
1127 nca
.type
= NCSI_PKT_CMD_DGMF
;
1128 if (ncsi_channel_is_tx(ndp
, nc
))
1129 nd
->state
= ncsi_dev_state_config_ecnt
;
1131 nd
->state
= ncsi_dev_state_config_ec
;
1132 } else if (nd
->state
== ncsi_dev_state_config_ecnt
) {
1133 if (np
->preferred_channel
&&
1134 nc
!= np
->preferred_channel
)
1135 netdev_info(ndp
->ndev
.dev
,
1136 "NCSI: Tx failed over to channel %u\n",
1138 nca
.type
= NCSI_PKT_CMD_ECNT
;
1139 nd
->state
= ncsi_dev_state_config_ec
;
1140 } else if (nd
->state
== ncsi_dev_state_config_ec
) {
1141 /* Enable AEN if it's supported */
1142 nca
.type
= NCSI_PKT_CMD_EC
;
1143 nd
->state
= ncsi_dev_state_config_ae
;
1144 if (!(nc
->caps
[NCSI_CAP_AEN
].cap
& NCSI_CAP_AEN_MASK
))
1145 nd
->state
= ncsi_dev_state_config_gls
;
1146 } else if (nd
->state
== ncsi_dev_state_config_ae
) {
1147 nca
.type
= NCSI_PKT_CMD_AE
;
1149 nca
.dwords
[1] = nc
->caps
[NCSI_CAP_AEN
].cap
;
1150 nd
->state
= ncsi_dev_state_config_gls
;
1151 } else if (nd
->state
== ncsi_dev_state_config_gls
) {
1152 nca
.type
= NCSI_PKT_CMD_GLS
;
1153 nd
->state
= ncsi_dev_state_config_done
;
1156 ret
= ncsi_xmit_cmd(&nca
);
1158 netdev_err(ndp
->ndev
.dev
,
1159 "NCSI: Failed to transmit CMD %x\n",
1164 case ncsi_dev_state_config_done
:
1165 netdev_dbg(ndp
->ndev
.dev
, "NCSI: channel %u config done\n",
1167 spin_lock_irqsave(&nc
->lock
, flags
);
1168 nc
->state
= NCSI_CHANNEL_ACTIVE
;
1170 if (ndp
->flags
& NCSI_DEV_RESET
) {
1171 /* A reset event happened during config, start it now */
1172 nc
->reconfigure_needed
= false;
1173 spin_unlock_irqrestore(&nc
->lock
, flags
);
1178 if (nc
->reconfigure_needed
) {
1179 /* This channel's configuration has been updated
1180 * part-way during the config state - start the
1181 * channel configuration over
1183 nc
->reconfigure_needed
= false;
1184 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1185 spin_unlock_irqrestore(&nc
->lock
, flags
);
1187 spin_lock_irqsave(&ndp
->lock
, flags
);
1188 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
1189 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1191 netdev_dbg(dev
, "Dirty NCSI channel state reset\n");
1192 ncsi_process_next_channel(ndp
);
1196 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1) {
1200 netdev_dbg(ndp
->ndev
.dev
,
1201 "NCSI: channel %u link down after config\n",
1204 spin_unlock_irqrestore(&nc
->lock
, flags
);
1206 /* Update the hot channel */
1207 spin_lock_irqsave(&ndp
->lock
, flags
);
1208 ndp
->hot_channel
= hot_nc
;
1209 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1211 ncsi_start_channel_monitor(nc
);
1212 ncsi_process_next_channel(ndp
);
1215 netdev_alert(dev
, "Wrong NCSI state 0x%x in config\n",
1222 ncsi_report_link(ndp
, true);
1225 static int ncsi_choose_active_channel(struct ncsi_dev_priv
*ndp
)
1227 struct ncsi_channel
*nc
, *found
, *hot_nc
;
1228 struct ncsi_channel_mode
*ncm
;
1229 unsigned long flags
, cflags
;
1230 struct ncsi_package
*np
;
1233 spin_lock_irqsave(&ndp
->lock
, flags
);
1234 hot_nc
= ndp
->hot_channel
;
1235 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1237 /* By default the search is done once an inactive channel with up
1238 * link is found, unless a preferred channel is set.
1239 * If multi_package or multi_channel are configured all channels in the
1240 * whitelist are added to the channel queue.
1244 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1245 if (!(ndp
->package_whitelist
& (0x1 << np
->id
)))
1247 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1248 if (!(np
->channel_whitelist
& (0x1 << nc
->id
)))
1251 spin_lock_irqsave(&nc
->lock
, cflags
);
1253 if (!list_empty(&nc
->link
) ||
1254 nc
->state
!= NCSI_CHANNEL_INACTIVE
) {
1255 spin_unlock_irqrestore(&nc
->lock
, cflags
);
1265 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
1266 if (ncm
->data
[2] & 0x1) {
1271 /* If multi_channel is enabled configure all valid
1272 * channels whether or not they currently have link
1273 * so they will have AENs enabled.
1275 if (with_link
|| np
->multi_channel
) {
1276 spin_lock_irqsave(&ndp
->lock
, flags
);
1277 list_add_tail_rcu(&nc
->link
,
1278 &ndp
->channel_queue
);
1279 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1281 netdev_dbg(ndp
->ndev
.dev
,
1282 "NCSI: Channel %u added to queue (link %s)\n",
1284 ncm
->data
[2] & 0x1 ? "up" : "down");
1287 spin_unlock_irqrestore(&nc
->lock
, cflags
);
1289 if (with_link
&& !np
->multi_channel
)
1292 if (with_link
&& !ndp
->multi_package
)
1296 if (list_empty(&ndp
->channel_queue
) && found
) {
1297 netdev_info(ndp
->ndev
.dev
,
1298 "NCSI: No channel with link found, configuring channel %u\n",
1300 spin_lock_irqsave(&ndp
->lock
, flags
);
1301 list_add_tail_rcu(&found
->link
, &ndp
->channel_queue
);
1302 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1303 } else if (!found
) {
1304 netdev_warn(ndp
->ndev
.dev
,
1305 "NCSI: No channel found to configure!\n");
1306 ncsi_report_link(ndp
, true);
1310 return ncsi_process_next_channel(ndp
);
1313 static bool ncsi_check_hwa(struct ncsi_dev_priv
*ndp
)
1315 struct ncsi_package
*np
;
1316 struct ncsi_channel
*nc
;
1318 bool has_channel
= false;
1320 /* The hardware arbitration is disabled if any one channel
1321 * doesn't support explicitly.
1323 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1324 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1327 cap
= nc
->caps
[NCSI_CAP_GENERIC
].cap
;
1328 if (!(cap
& NCSI_CAP_GENERIC_HWA
) ||
1329 (cap
& NCSI_CAP_GENERIC_HWA_MASK
) !=
1330 NCSI_CAP_GENERIC_HWA_SUPPORT
) {
1331 ndp
->flags
&= ~NCSI_DEV_HWA
;
1338 ndp
->flags
|= NCSI_DEV_HWA
;
1342 ndp
->flags
&= ~NCSI_DEV_HWA
;
1346 static void ncsi_probe_channel(struct ncsi_dev_priv
*ndp
)
1348 struct ncsi_dev
*nd
= &ndp
->ndev
;
1349 struct ncsi_package
*np
;
1350 struct ncsi_cmd_arg nca
;
1351 unsigned char index
;
1355 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
1356 switch (nd
->state
) {
1357 case ncsi_dev_state_probe
:
1358 nd
->state
= ncsi_dev_state_probe_deselect
;
1360 case ncsi_dev_state_probe_deselect
:
1361 ndp
->pending_req_num
= 8;
1363 /* Deselect all possible packages */
1364 nca
.type
= NCSI_PKT_CMD_DP
;
1365 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1366 for (index
= 0; index
< 8; index
++) {
1367 nca
.package
= index
;
1368 ret
= ncsi_xmit_cmd(&nca
);
1373 nd
->state
= ncsi_dev_state_probe_package
;
1375 case ncsi_dev_state_probe_package
:
1376 ndp
->pending_req_num
= 1;
1378 nca
.type
= NCSI_PKT_CMD_SP
;
1380 nca
.package
= ndp
->package_probe_id
;
1381 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1382 ret
= ncsi_xmit_cmd(&nca
);
1385 nd
->state
= ncsi_dev_state_probe_channel
;
1387 case ncsi_dev_state_probe_channel
:
1388 ndp
->active_package
= ncsi_find_package(ndp
,
1389 ndp
->package_probe_id
);
1390 if (!ndp
->active_package
) {
1392 nd
->state
= ncsi_dev_state_probe_dp
;
1393 schedule_work(&ndp
->work
);
1396 nd
->state
= ncsi_dev_state_probe_cis
;
1397 if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC
) &&
1398 ndp
->mlx_multi_host
)
1399 nd
->state
= ncsi_dev_state_probe_mlx_gma
;
1401 schedule_work(&ndp
->work
);
1403 case ncsi_dev_state_probe_mlx_gma
:
1404 ndp
->pending_req_num
= 1;
1406 nca
.type
= NCSI_PKT_CMD_OEM
;
1407 nca
.package
= ndp
->active_package
->id
;
1409 ret
= ncsi_oem_gma_handler_mlx(&nca
);
1413 nd
->state
= ncsi_dev_state_probe_mlx_smaf
;
1415 case ncsi_dev_state_probe_mlx_smaf
:
1416 ndp
->pending_req_num
= 1;
1418 nca
.type
= NCSI_PKT_CMD_OEM
;
1419 nca
.package
= ndp
->active_package
->id
;
1421 ret
= ncsi_oem_smaf_mlx(&nca
);
1425 nd
->state
= ncsi_dev_state_probe_cis
;
1427 case ncsi_dev_state_probe_keep_phy
:
1428 ndp
->pending_req_num
= 1;
1430 nca
.type
= NCSI_PKT_CMD_OEM
;
1431 nca
.package
= ndp
->active_package
->id
;
1433 ret
= ncsi_oem_keep_phy_intel(&nca
);
1437 nd
->state
= ncsi_dev_state_probe_gvi
;
1439 case ncsi_dev_state_probe_cis
:
1440 case ncsi_dev_state_probe_gvi
:
1441 case ncsi_dev_state_probe_gc
:
1442 case ncsi_dev_state_probe_gls
:
1443 np
= ndp
->active_package
;
1444 ndp
->pending_req_num
= 1;
1446 /* Clear initial state Retrieve version, capability or link status */
1447 if (nd
->state
== ncsi_dev_state_probe_cis
)
1448 nca
.type
= NCSI_PKT_CMD_CIS
;
1449 else if (nd
->state
== ncsi_dev_state_probe_gvi
)
1450 nca
.type
= NCSI_PKT_CMD_GVI
;
1451 else if (nd
->state
== ncsi_dev_state_probe_gc
)
1452 nca
.type
= NCSI_PKT_CMD_GC
;
1454 nca
.type
= NCSI_PKT_CMD_GLS
;
1456 nca
.package
= np
->id
;
1457 nca
.channel
= ndp
->channel_probe_id
;
1459 ret
= ncsi_xmit_cmd(&nca
);
1463 if (nd
->state
== ncsi_dev_state_probe_cis
) {
1464 nd
->state
= ncsi_dev_state_probe_gvi
;
1465 if (IS_ENABLED(CONFIG_NCSI_OEM_CMD_KEEP_PHY
) && ndp
->channel_probe_id
== 0)
1466 nd
->state
= ncsi_dev_state_probe_keep_phy
;
1467 } else if (nd
->state
== ncsi_dev_state_probe_gvi
) {
1468 nd
->state
= ncsi_dev_state_probe_gc
;
1469 } else if (nd
->state
== ncsi_dev_state_probe_gc
) {
1470 nd
->state
= ncsi_dev_state_probe_gls
;
1472 nd
->state
= ncsi_dev_state_probe_cis
;
1473 ndp
->channel_probe_id
++;
1476 if (ndp
->channel_probe_id
== ndp
->channel_count
) {
1477 ndp
->channel_probe_id
= 0;
1478 nd
->state
= ncsi_dev_state_probe_dp
;
1481 case ncsi_dev_state_probe_dp
:
1482 ndp
->pending_req_num
= 1;
1484 /* Deselect the current package */
1485 nca
.type
= NCSI_PKT_CMD_DP
;
1486 nca
.package
= ndp
->package_probe_id
;
1487 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1488 ret
= ncsi_xmit_cmd(&nca
);
1492 /* Probe next package */
1493 ndp
->package_probe_id
++;
1494 if (ndp
->package_probe_id
>= 8) {
1495 /* Probe finished */
1496 ndp
->flags
|= NCSI_DEV_PROBED
;
1499 nd
->state
= ncsi_dev_state_probe_package
;
1500 ndp
->active_package
= NULL
;
1503 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%0x in enumeration\n",
1507 if (ndp
->flags
& NCSI_DEV_PROBED
) {
1508 /* Check if all packages have HWA support */
1509 ncsi_check_hwa(ndp
);
1510 ncsi_choose_active_channel(ndp
);
1515 netdev_err(ndp
->ndev
.dev
,
1516 "NCSI: Failed to transmit cmd 0x%x during probe\n",
1518 ncsi_report_link(ndp
, true);
1521 static void ncsi_dev_work(struct work_struct
*work
)
1523 struct ncsi_dev_priv
*ndp
= container_of(work
,
1524 struct ncsi_dev_priv
, work
);
1525 struct ncsi_dev
*nd
= &ndp
->ndev
;
1527 switch (nd
->state
& ncsi_dev_state_major
) {
1528 case ncsi_dev_state_probe
:
1529 ncsi_probe_channel(ndp
);
1531 case ncsi_dev_state_suspend
:
1532 ncsi_suspend_channel(ndp
);
1534 case ncsi_dev_state_config
:
1535 ncsi_configure_channel(ndp
);
1538 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in workqueue\n",
1543 int ncsi_process_next_channel(struct ncsi_dev_priv
*ndp
)
1545 struct ncsi_channel
*nc
;
1547 unsigned long flags
;
1549 spin_lock_irqsave(&ndp
->lock
, flags
);
1550 nc
= list_first_or_null_rcu(&ndp
->channel_queue
,
1551 struct ncsi_channel
, link
);
1553 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1557 list_del_init(&nc
->link
);
1558 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1560 spin_lock_irqsave(&nc
->lock
, flags
);
1561 old_state
= nc
->state
;
1562 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
1563 spin_unlock_irqrestore(&nc
->lock
, flags
);
1565 ndp
->active_channel
= nc
;
1566 ndp
->active_package
= nc
->package
;
1568 switch (old_state
) {
1569 case NCSI_CHANNEL_INACTIVE
:
1570 ndp
->ndev
.state
= ncsi_dev_state_config
;
1571 netdev_dbg(ndp
->ndev
.dev
, "NCSI: configuring channel %u\n",
1573 ncsi_configure_channel(ndp
);
1575 case NCSI_CHANNEL_ACTIVE
:
1576 ndp
->ndev
.state
= ncsi_dev_state_suspend
;
1577 netdev_dbg(ndp
->ndev
.dev
, "NCSI: suspending channel %u\n",
1579 ncsi_suspend_channel(ndp
);
1582 netdev_err(ndp
->ndev
.dev
, "Invalid state 0x%x on %d:%d\n",
1583 old_state
, nc
->package
->id
, nc
->id
);
1584 ncsi_report_link(ndp
, false);
1591 ndp
->active_channel
= NULL
;
1592 ndp
->active_package
= NULL
;
1593 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
) {
1594 ndp
->flags
&= ~NCSI_DEV_RESHUFFLE
;
1595 return ncsi_choose_active_channel(ndp
);
1598 ncsi_report_link(ndp
, false);
1602 static int ncsi_kick_channels(struct ncsi_dev_priv
*ndp
)
1604 struct ncsi_dev
*nd
= &ndp
->ndev
;
1605 struct ncsi_channel
*nc
;
1606 struct ncsi_package
*np
;
1607 unsigned long flags
;
1610 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1611 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1612 spin_lock_irqsave(&nc
->lock
, flags
);
1614 /* Channels may be busy, mark dirty instead of
1616 * a) not ACTIVE (configured)
1617 * b) in the channel_queue (to be configured)
1618 * c) it's ndev is in the config state
1620 if (nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
1621 if ((ndp
->ndev
.state
& 0xff00) ==
1622 ncsi_dev_state_config
||
1623 !list_empty(&nc
->link
)) {
1625 "NCSI: channel %p marked dirty\n",
1627 nc
->reconfigure_needed
= true;
1629 spin_unlock_irqrestore(&nc
->lock
, flags
);
1633 spin_unlock_irqrestore(&nc
->lock
, flags
);
1635 ncsi_stop_channel_monitor(nc
);
1636 spin_lock_irqsave(&nc
->lock
, flags
);
1637 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1638 spin_unlock_irqrestore(&nc
->lock
, flags
);
1640 spin_lock_irqsave(&ndp
->lock
, flags
);
1641 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
1642 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1644 netdev_dbg(nd
->dev
, "NCSI: kicked channel %p\n", nc
);
1652 int ncsi_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1654 struct ncsi_dev_priv
*ndp
;
1655 unsigned int n_vids
= 0;
1656 struct vlan_vid
*vlan
;
1657 struct ncsi_dev
*nd
;
1663 nd
= ncsi_find_dev(dev
);
1665 netdev_warn(dev
, "NCSI: No net_device?\n");
1669 ndp
= TO_NCSI_DEV_PRIV(nd
);
1671 /* Add the VLAN id to our internal list */
1672 list_for_each_entry_rcu(vlan
, &ndp
->vlan_vids
, list
) {
1674 if (vlan
->vid
== vid
) {
1675 netdev_dbg(dev
, "NCSI: vid %u already registered\n",
1680 if (n_vids
>= NCSI_MAX_VLAN_VIDS
) {
1682 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1683 vid
, NCSI_MAX_VLAN_VIDS
);
1687 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
1691 vlan
->proto
= proto
;
1693 list_add_rcu(&vlan
->list
, &ndp
->vlan_vids
);
1695 netdev_dbg(dev
, "NCSI: Added new vid %u\n", vid
);
1697 found
= ncsi_kick_channels(ndp
) != 0;
1699 return found
? ncsi_process_next_channel(ndp
) : 0;
1701 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid
);
1703 int ncsi_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1705 struct vlan_vid
*vlan
, *tmp
;
1706 struct ncsi_dev_priv
*ndp
;
1707 struct ncsi_dev
*nd
;
1713 nd
= ncsi_find_dev(dev
);
1715 netdev_warn(dev
, "NCSI: no net_device?\n");
1719 ndp
= TO_NCSI_DEV_PRIV(nd
);
1721 /* Remove the VLAN id from our internal list */
1722 list_for_each_entry_safe(vlan
, tmp
, &ndp
->vlan_vids
, list
)
1723 if (vlan
->vid
== vid
) {
1724 netdev_dbg(dev
, "NCSI: vid %u found, removing\n", vid
);
1725 list_del_rcu(&vlan
->list
);
1731 netdev_err(dev
, "NCSI: vid %u wasn't registered!\n", vid
);
1735 found
= ncsi_kick_channels(ndp
) != 0;
1737 return found
? ncsi_process_next_channel(ndp
) : 0;
1739 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid
);
1741 struct ncsi_dev
*ncsi_register_dev(struct net_device
*dev
,
1742 void (*handler
)(struct ncsi_dev
*ndev
))
1744 struct ncsi_dev_priv
*ndp
;
1745 struct ncsi_dev
*nd
;
1746 struct platform_device
*pdev
;
1747 struct device_node
*np
;
1748 unsigned long flags
;
1751 /* Check if the device has been registered or not */
1752 nd
= ncsi_find_dev(dev
);
1756 /* Create NCSI device */
1757 ndp
= kzalloc(sizeof(*ndp
), GFP_ATOMIC
);
1762 nd
->state
= ncsi_dev_state_registered
;
1764 nd
->handler
= handler
;
1765 ndp
->pending_req_num
= 0;
1766 INIT_LIST_HEAD(&ndp
->channel_queue
);
1767 INIT_LIST_HEAD(&ndp
->vlan_vids
);
1768 INIT_WORK(&ndp
->work
, ncsi_dev_work
);
1769 ndp
->package_whitelist
= UINT_MAX
;
1771 /* Initialize private NCSI device */
1772 spin_lock_init(&ndp
->lock
);
1773 INIT_LIST_HEAD(&ndp
->packages
);
1774 ndp
->request_id
= NCSI_REQ_START_IDX
;
1775 for (i
= 0; i
< ARRAY_SIZE(ndp
->requests
); i
++) {
1776 ndp
->requests
[i
].id
= i
;
1777 ndp
->requests
[i
].ndp
= ndp
;
1778 timer_setup(&ndp
->requests
[i
].timer
, ncsi_request_timeout
, 0);
1780 ndp
->channel_count
= NCSI_RESERVED_CHANNEL
;
1782 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1783 list_add_tail_rcu(&ndp
->node
, &ncsi_dev_list
);
1784 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1786 /* Register NCSI packet Rx handler */
1787 ndp
->ptype
.type
= cpu_to_be16(ETH_P_NCSI
);
1788 ndp
->ptype
.func
= ncsi_rcv_rsp
;
1789 ndp
->ptype
.dev
= dev
;
1790 dev_add_pack(&ndp
->ptype
);
1792 pdev
= to_platform_device(dev
->dev
.parent
);
1794 np
= pdev
->dev
.of_node
;
1795 if (np
&& (of_property_read_bool(np
, "mellanox,multi-host") ||
1796 of_property_read_bool(np
, "mlx,multi-host")))
1797 ndp
->mlx_multi_host
= true;
1802 EXPORT_SYMBOL_GPL(ncsi_register_dev
);
1804 int ncsi_start_dev(struct ncsi_dev
*nd
)
1806 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1808 if (nd
->state
!= ncsi_dev_state_registered
&&
1809 nd
->state
!= ncsi_dev_state_functional
)
1812 if (!(ndp
->flags
& NCSI_DEV_PROBED
)) {
1813 ndp
->package_probe_id
= 0;
1814 ndp
->channel_probe_id
= 0;
1815 nd
->state
= ncsi_dev_state_probe
;
1816 schedule_work(&ndp
->work
);
1820 return ncsi_reset_dev(nd
);
1822 EXPORT_SYMBOL_GPL(ncsi_start_dev
);
1824 void ncsi_stop_dev(struct ncsi_dev
*nd
)
1826 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1827 struct ncsi_package
*np
;
1828 struct ncsi_channel
*nc
;
1831 unsigned long flags
;
1833 /* Stop the channel monitor on any active channels. Don't reset the
1834 * channel state so we know which were active when ncsi_start_dev()
1837 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1838 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1839 ncsi_stop_channel_monitor(nc
);
1841 spin_lock_irqsave(&nc
->lock
, flags
);
1842 chained
= !list_empty(&nc
->link
);
1843 old_state
= nc
->state
;
1844 spin_unlock_irqrestore(&nc
->lock
, flags
);
1846 WARN_ON_ONCE(chained
||
1847 old_state
== NCSI_CHANNEL_INVISIBLE
);
1851 netdev_dbg(ndp
->ndev
.dev
, "NCSI: Stopping device\n");
1852 ncsi_report_link(ndp
, true);
1854 EXPORT_SYMBOL_GPL(ncsi_stop_dev
);
1856 int ncsi_reset_dev(struct ncsi_dev
*nd
)
1858 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1859 struct ncsi_channel
*nc
, *active
, *tmp
;
1860 struct ncsi_package
*np
;
1861 unsigned long flags
;
1863 spin_lock_irqsave(&ndp
->lock
, flags
);
1865 if (!(ndp
->flags
& NCSI_DEV_RESET
)) {
1866 /* Haven't been called yet, check states */
1867 switch (nd
->state
& ncsi_dev_state_major
) {
1868 case ncsi_dev_state_registered
:
1869 case ncsi_dev_state_probe
:
1870 /* Not even probed yet - do nothing */
1871 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1873 case ncsi_dev_state_suspend
:
1874 case ncsi_dev_state_config
:
1875 /* Wait for the channel to finish its suspend/config
1876 * operation; once it finishes it will check for
1877 * NCSI_DEV_RESET and reset the state.
1879 ndp
->flags
|= NCSI_DEV_RESET
;
1880 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1884 switch (nd
->state
) {
1885 case ncsi_dev_state_suspend_done
:
1886 case ncsi_dev_state_config_done
:
1887 case ncsi_dev_state_functional
:
1891 /* Current reset operation happening */
1892 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1897 if (!list_empty(&ndp
->channel_queue
)) {
1898 /* Clear any channel queue we may have interrupted */
1899 list_for_each_entry_safe(nc
, tmp
, &ndp
->channel_queue
, link
)
1900 list_del_init(&nc
->link
);
1902 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1905 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1906 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1907 spin_lock_irqsave(&nc
->lock
, flags
);
1909 if (nc
->state
== NCSI_CHANNEL_ACTIVE
) {
1911 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
1912 spin_unlock_irqrestore(&nc
->lock
, flags
);
1913 ncsi_stop_channel_monitor(nc
);
1917 spin_unlock_irqrestore(&nc
->lock
, flags
);
1925 spin_lock_irqsave(&ndp
->lock
, flags
);
1926 ndp
->flags
&= ~NCSI_DEV_RESET
;
1927 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1928 return ncsi_choose_active_channel(ndp
);
1931 spin_lock_irqsave(&ndp
->lock
, flags
);
1932 ndp
->flags
|= NCSI_DEV_RESET
;
1933 ndp
->active_channel
= active
;
1934 ndp
->active_package
= active
->package
;
1935 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1937 nd
->state
= ncsi_dev_state_suspend
;
1938 schedule_work(&ndp
->work
);
1942 void ncsi_unregister_dev(struct ncsi_dev
*nd
)
1944 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1945 struct ncsi_package
*np
, *tmp
;
1946 unsigned long flags
;
1948 dev_remove_pack(&ndp
->ptype
);
1950 list_for_each_entry_safe(np
, tmp
, &ndp
->packages
, node
)
1951 ncsi_remove_package(np
);
1953 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1954 list_del_rcu(&ndp
->node
);
1955 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1957 disable_work_sync(&ndp
->work
);
1961 EXPORT_SYMBOL_GPL(ncsi_unregister_dev
);