1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright Gavin Shan, IBM Corporation 2016.
6 #include <linux/module.h>
7 #include <linux/kernel.h>
8 #include <linux/init.h>
9 #include <linux/netdevice.h>
10 #include <linux/skbuff.h>
13 #include <net/net_namespace.h>
15 #include <net/addrconf.h>
17 #include <net/genetlink.h>
21 #include "ncsi-netlink.h"
23 LIST_HEAD(ncsi_dev_list
);
24 DEFINE_SPINLOCK(ncsi_dev_lock
);
26 bool ncsi_channel_has_link(struct ncsi_channel
*channel
)
28 return !!(channel
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1);
31 bool ncsi_channel_is_last(struct ncsi_dev_priv
*ndp
,
32 struct ncsi_channel
*channel
)
34 struct ncsi_package
*np
;
35 struct ncsi_channel
*nc
;
37 NCSI_FOR_EACH_PACKAGE(ndp
, np
)
38 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
41 if (nc
->state
== NCSI_CHANNEL_ACTIVE
&&
42 ncsi_channel_has_link(nc
))
49 static void ncsi_report_link(struct ncsi_dev_priv
*ndp
, bool force_down
)
51 struct ncsi_dev
*nd
= &ndp
->ndev
;
52 struct ncsi_package
*np
;
53 struct ncsi_channel
*nc
;
56 nd
->state
= ncsi_dev_state_functional
;
63 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
64 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
65 spin_lock_irqsave(&nc
->lock
, flags
);
67 if (!list_empty(&nc
->link
) ||
68 nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
69 spin_unlock_irqrestore(&nc
->lock
, flags
);
73 if (ncsi_channel_has_link(nc
)) {
74 spin_unlock_irqrestore(&nc
->lock
, flags
);
79 spin_unlock_irqrestore(&nc
->lock
, flags
);
87 static void ncsi_channel_monitor(struct timer_list
*t
)
89 struct ncsi_channel
*nc
= from_timer(nc
, t
, monitor
.timer
);
90 struct ncsi_package
*np
= nc
->package
;
91 struct ncsi_dev_priv
*ndp
= np
->ndp
;
92 struct ncsi_channel_mode
*ncm
;
93 struct ncsi_cmd_arg nca
;
94 bool enabled
, chained
;
95 unsigned int monitor_state
;
99 spin_lock_irqsave(&nc
->lock
, flags
);
101 chained
= !list_empty(&nc
->link
);
102 enabled
= nc
->monitor
.enabled
;
103 monitor_state
= nc
->monitor
.state
;
104 spin_unlock_irqrestore(&nc
->lock
, flags
);
106 if (!enabled
|| chained
) {
107 ncsi_stop_channel_monitor(nc
);
110 if (state
!= NCSI_CHANNEL_INACTIVE
&&
111 state
!= NCSI_CHANNEL_ACTIVE
) {
112 ncsi_stop_channel_monitor(nc
);
116 switch (monitor_state
) {
117 case NCSI_CHANNEL_MONITOR_START
:
118 case NCSI_CHANNEL_MONITOR_RETRY
:
120 nca
.package
= np
->id
;
121 nca
.channel
= nc
->id
;
122 nca
.type
= NCSI_PKT_CMD_GLS
;
124 ret
= ncsi_xmit_cmd(&nca
);
126 netdev_err(ndp
->ndev
.dev
, "Error %d sending GLS\n",
129 case NCSI_CHANNEL_MONITOR_WAIT
... NCSI_CHANNEL_MONITOR_WAIT_MAX
:
132 netdev_err(ndp
->ndev
.dev
, "NCSI Channel %d timed out!\n",
134 ncsi_report_link(ndp
, true);
135 ndp
->flags
|= NCSI_DEV_RESHUFFLE
;
137 ncsi_stop_channel_monitor(nc
);
139 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
140 spin_lock_irqsave(&nc
->lock
, flags
);
141 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
142 ncm
->data
[2] &= ~0x1;
143 spin_unlock_irqrestore(&nc
->lock
, flags
);
145 spin_lock_irqsave(&ndp
->lock
, flags
);
146 nc
->state
= NCSI_CHANNEL_ACTIVE
;
147 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
148 spin_unlock_irqrestore(&ndp
->lock
, flags
);
149 ncsi_process_next_channel(ndp
);
153 spin_lock_irqsave(&nc
->lock
, flags
);
155 spin_unlock_irqrestore(&nc
->lock
, flags
);
156 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
159 void ncsi_start_channel_monitor(struct ncsi_channel
*nc
)
163 spin_lock_irqsave(&nc
->lock
, flags
);
164 WARN_ON_ONCE(nc
->monitor
.enabled
);
165 nc
->monitor
.enabled
= true;
166 nc
->monitor
.state
= NCSI_CHANNEL_MONITOR_START
;
167 spin_unlock_irqrestore(&nc
->lock
, flags
);
169 mod_timer(&nc
->monitor
.timer
, jiffies
+ HZ
);
172 void ncsi_stop_channel_monitor(struct ncsi_channel
*nc
)
176 spin_lock_irqsave(&nc
->lock
, flags
);
177 if (!nc
->monitor
.enabled
) {
178 spin_unlock_irqrestore(&nc
->lock
, flags
);
181 nc
->monitor
.enabled
= false;
182 spin_unlock_irqrestore(&nc
->lock
, flags
);
184 del_timer_sync(&nc
->monitor
.timer
);
187 struct ncsi_channel
*ncsi_find_channel(struct ncsi_package
*np
,
190 struct ncsi_channel
*nc
;
192 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
200 struct ncsi_channel
*ncsi_add_channel(struct ncsi_package
*np
, unsigned char id
)
202 struct ncsi_channel
*nc
, *tmp
;
206 nc
= kzalloc(sizeof(*nc
), GFP_ATOMIC
);
212 nc
->state
= NCSI_CHANNEL_INACTIVE
;
213 nc
->monitor
.enabled
= false;
214 timer_setup(&nc
->monitor
.timer
, ncsi_channel_monitor
, 0);
215 spin_lock_init(&nc
->lock
);
216 INIT_LIST_HEAD(&nc
->link
);
217 for (index
= 0; index
< NCSI_CAP_MAX
; index
++)
218 nc
->caps
[index
].index
= index
;
219 for (index
= 0; index
< NCSI_MODE_MAX
; index
++)
220 nc
->modes
[index
].index
= index
;
222 spin_lock_irqsave(&np
->lock
, flags
);
223 tmp
= ncsi_find_channel(np
, id
);
225 spin_unlock_irqrestore(&np
->lock
, flags
);
230 list_add_tail_rcu(&nc
->node
, &np
->channels
);
232 spin_unlock_irqrestore(&np
->lock
, flags
);
237 static void ncsi_remove_channel(struct ncsi_channel
*nc
)
239 struct ncsi_package
*np
= nc
->package
;
242 spin_lock_irqsave(&nc
->lock
, flags
);
244 /* Release filters */
245 kfree(nc
->mac_filter
.addrs
);
246 kfree(nc
->vlan_filter
.vids
);
248 nc
->state
= NCSI_CHANNEL_INACTIVE
;
249 spin_unlock_irqrestore(&nc
->lock
, flags
);
250 ncsi_stop_channel_monitor(nc
);
252 /* Remove and free channel */
253 spin_lock_irqsave(&np
->lock
, flags
);
254 list_del_rcu(&nc
->node
);
256 spin_unlock_irqrestore(&np
->lock
, flags
);
261 struct ncsi_package
*ncsi_find_package(struct ncsi_dev_priv
*ndp
,
264 struct ncsi_package
*np
;
266 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
274 struct ncsi_package
*ncsi_add_package(struct ncsi_dev_priv
*ndp
,
277 struct ncsi_package
*np
, *tmp
;
280 np
= kzalloc(sizeof(*np
), GFP_ATOMIC
);
286 spin_lock_init(&np
->lock
);
287 INIT_LIST_HEAD(&np
->channels
);
288 np
->channel_whitelist
= UINT_MAX
;
290 spin_lock_irqsave(&ndp
->lock
, flags
);
291 tmp
= ncsi_find_package(ndp
, id
);
293 spin_unlock_irqrestore(&ndp
->lock
, flags
);
298 list_add_tail_rcu(&np
->node
, &ndp
->packages
);
300 spin_unlock_irqrestore(&ndp
->lock
, flags
);
305 void ncsi_remove_package(struct ncsi_package
*np
)
307 struct ncsi_dev_priv
*ndp
= np
->ndp
;
308 struct ncsi_channel
*nc
, *tmp
;
311 /* Release all child channels */
312 list_for_each_entry_safe(nc
, tmp
, &np
->channels
, node
)
313 ncsi_remove_channel(nc
);
315 /* Remove and free package */
316 spin_lock_irqsave(&ndp
->lock
, flags
);
317 list_del_rcu(&np
->node
);
319 spin_unlock_irqrestore(&ndp
->lock
, flags
);
324 void ncsi_find_package_and_channel(struct ncsi_dev_priv
*ndp
,
326 struct ncsi_package
**np
,
327 struct ncsi_channel
**nc
)
329 struct ncsi_package
*p
;
330 struct ncsi_channel
*c
;
332 p
= ncsi_find_package(ndp
, NCSI_PACKAGE_INDEX(id
));
333 c
= p
? ncsi_find_channel(p
, NCSI_CHANNEL_INDEX(id
)) : NULL
;
341 /* For two consecutive NCSI commands, the packet IDs shouldn't
342 * be same. Otherwise, the bogus response might be replied. So
343 * the available IDs are allocated in round-robin fashion.
345 struct ncsi_request
*ncsi_alloc_request(struct ncsi_dev_priv
*ndp
,
346 unsigned int req_flags
)
348 struct ncsi_request
*nr
= NULL
;
349 int i
, limit
= ARRAY_SIZE(ndp
->requests
);
352 /* Check if there is one available request until the ceiling */
353 spin_lock_irqsave(&ndp
->lock
, flags
);
354 for (i
= ndp
->request_id
; i
< limit
; i
++) {
355 if (ndp
->requests
[i
].used
)
358 nr
= &ndp
->requests
[i
];
360 nr
->flags
= req_flags
;
361 ndp
->request_id
= i
+ 1;
365 /* Fail back to check from the starting cursor */
366 for (i
= NCSI_REQ_START_IDX
; i
< ndp
->request_id
; i
++) {
367 if (ndp
->requests
[i
].used
)
370 nr
= &ndp
->requests
[i
];
372 nr
->flags
= req_flags
;
373 ndp
->request_id
= i
+ 1;
378 spin_unlock_irqrestore(&ndp
->lock
, flags
);
382 void ncsi_free_request(struct ncsi_request
*nr
)
384 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
385 struct sk_buff
*cmd
, *rsp
;
391 del_timer_sync(&nr
->timer
);
394 spin_lock_irqsave(&ndp
->lock
, flags
);
400 driven
= !!(nr
->flags
& NCSI_REQ_FLAG_EVENT_DRIVEN
);
401 spin_unlock_irqrestore(&ndp
->lock
, flags
);
403 if (driven
&& cmd
&& --ndp
->pending_req_num
== 0)
404 schedule_work(&ndp
->work
);
406 /* Release command and response */
411 struct ncsi_dev
*ncsi_find_dev(struct net_device
*dev
)
413 struct ncsi_dev_priv
*ndp
;
415 NCSI_FOR_EACH_DEV(ndp
) {
416 if (ndp
->ndev
.dev
== dev
)
423 static void ncsi_request_timeout(struct timer_list
*t
)
425 struct ncsi_request
*nr
= from_timer(nr
, t
, timer
);
426 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
427 struct ncsi_cmd_pkt
*cmd
;
428 struct ncsi_package
*np
;
429 struct ncsi_channel
*nc
;
432 /* If the request already had associated response,
433 * let the response handler to release it.
435 spin_lock_irqsave(&ndp
->lock
, flags
);
437 if (nr
->rsp
|| !nr
->cmd
) {
438 spin_unlock_irqrestore(&ndp
->lock
, flags
);
441 spin_unlock_irqrestore(&ndp
->lock
, flags
);
443 if (nr
->flags
== NCSI_REQ_FLAG_NETLINK_DRIVEN
) {
445 /* Find the package */
446 cmd
= (struct ncsi_cmd_pkt
*)
447 skb_network_header(nr
->cmd
);
448 ncsi_find_package_and_channel(ndp
,
449 cmd
->cmd
.common
.channel
,
451 ncsi_send_netlink_timeout(nr
, np
, nc
);
455 /* Release the request */
456 ncsi_free_request(nr
);
459 static void ncsi_suspend_channel(struct ncsi_dev_priv
*ndp
)
461 struct ncsi_dev
*nd
= &ndp
->ndev
;
462 struct ncsi_package
*np
;
463 struct ncsi_channel
*nc
, *tmp
;
464 struct ncsi_cmd_arg nca
;
468 np
= ndp
->active_package
;
469 nc
= ndp
->active_channel
;
471 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
473 case ncsi_dev_state_suspend
:
474 nd
->state
= ncsi_dev_state_suspend_select
;
476 case ncsi_dev_state_suspend_select
:
477 ndp
->pending_req_num
= 1;
479 nca
.type
= NCSI_PKT_CMD_SP
;
480 nca
.package
= np
->id
;
481 nca
.channel
= NCSI_RESERVED_CHANNEL
;
482 if (ndp
->flags
& NCSI_DEV_HWA
)
487 /* To retrieve the last link states of channels in current
488 * package when current active channel needs fail over to
489 * another one. It means we will possibly select another
490 * channel as next active one. The link states of channels
491 * are most important factor of the selection. So we need
492 * accurate link states. Unfortunately, the link states on
493 * inactive channels can't be updated with LSC AEN in time.
495 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
)
496 nd
->state
= ncsi_dev_state_suspend_gls
;
498 nd
->state
= ncsi_dev_state_suspend_dcnt
;
499 ret
= ncsi_xmit_cmd(&nca
);
504 case ncsi_dev_state_suspend_gls
:
505 ndp
->pending_req_num
= np
->channel_num
;
507 nca
.type
= NCSI_PKT_CMD_GLS
;
508 nca
.package
= np
->id
;
510 nd
->state
= ncsi_dev_state_suspend_dcnt
;
511 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
512 nca
.channel
= nc
->id
;
513 ret
= ncsi_xmit_cmd(&nca
);
519 case ncsi_dev_state_suspend_dcnt
:
520 ndp
->pending_req_num
= 1;
522 nca
.type
= NCSI_PKT_CMD_DCNT
;
523 nca
.package
= np
->id
;
524 nca
.channel
= nc
->id
;
526 nd
->state
= ncsi_dev_state_suspend_dc
;
527 ret
= ncsi_xmit_cmd(&nca
);
532 case ncsi_dev_state_suspend_dc
:
533 ndp
->pending_req_num
= 1;
535 nca
.type
= NCSI_PKT_CMD_DC
;
536 nca
.package
= np
->id
;
537 nca
.channel
= nc
->id
;
540 nd
->state
= ncsi_dev_state_suspend_deselect
;
541 ret
= ncsi_xmit_cmd(&nca
);
545 NCSI_FOR_EACH_CHANNEL(np
, tmp
) {
546 /* If there is another channel active on this package
547 * do not deselect the package.
549 if (tmp
!= nc
&& tmp
->state
== NCSI_CHANNEL_ACTIVE
) {
550 nd
->state
= ncsi_dev_state_suspend_done
;
555 case ncsi_dev_state_suspend_deselect
:
556 ndp
->pending_req_num
= 1;
558 nca
.type
= NCSI_PKT_CMD_DP
;
559 nca
.package
= np
->id
;
560 nca
.channel
= NCSI_RESERVED_CHANNEL
;
562 nd
->state
= ncsi_dev_state_suspend_done
;
563 ret
= ncsi_xmit_cmd(&nca
);
568 case ncsi_dev_state_suspend_done
:
569 spin_lock_irqsave(&nc
->lock
, flags
);
570 nc
->state
= NCSI_CHANNEL_INACTIVE
;
571 spin_unlock_irqrestore(&nc
->lock
, flags
);
572 if (ndp
->flags
& NCSI_DEV_RESET
)
575 ncsi_process_next_channel(ndp
);
578 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in suspend\n",
584 nd
->state
= ncsi_dev_state_functional
;
587 /* Check the VLAN filter bitmap for a set filter, and construct a
588 * "Set VLAN Filter - Disable" packet if found.
590 static int clear_one_vid(struct ncsi_dev_priv
*ndp
, struct ncsi_channel
*nc
,
591 struct ncsi_cmd_arg
*nca
)
593 struct ncsi_channel_vlan_filter
*ncf
;
599 ncf
= &nc
->vlan_filter
;
600 bitmap
= &ncf
->bitmap
;
602 spin_lock_irqsave(&nc
->lock
, flags
);
603 index
= find_next_bit(bitmap
, ncf
->n_vids
, 0);
604 if (index
>= ncf
->n_vids
) {
605 spin_unlock_irqrestore(&nc
->lock
, flags
);
608 vid
= ncf
->vids
[index
];
610 clear_bit(index
, bitmap
);
611 ncf
->vids
[index
] = 0;
612 spin_unlock_irqrestore(&nc
->lock
, flags
);
614 nca
->type
= NCSI_PKT_CMD_SVF
;
616 /* HW filter index starts at 1 */
617 nca
->bytes
[6] = index
+ 1;
618 nca
->bytes
[7] = 0x00;
622 /* Find an outstanding VLAN tag and constuct a "Set VLAN Filter - Enable"
625 static int set_one_vid(struct ncsi_dev_priv
*ndp
, struct ncsi_channel
*nc
,
626 struct ncsi_cmd_arg
*nca
)
628 struct ncsi_channel_vlan_filter
*ncf
;
629 struct vlan_vid
*vlan
= NULL
;
635 if (list_empty(&ndp
->vlan_vids
))
638 ncf
= &nc
->vlan_filter
;
639 bitmap
= &ncf
->bitmap
;
641 spin_lock_irqsave(&nc
->lock
, flags
);
644 list_for_each_entry_rcu(vlan
, &ndp
->vlan_vids
, list
) {
646 for (i
= 0; i
< ncf
->n_vids
; i
++)
647 if (ncf
->vids
[i
] == vid
) {
657 /* No VLAN ID is not set */
658 spin_unlock_irqrestore(&nc
->lock
, flags
);
662 index
= find_next_zero_bit(bitmap
, ncf
->n_vids
, 0);
663 if (index
< 0 || index
>= ncf
->n_vids
) {
664 netdev_err(ndp
->ndev
.dev
,
665 "Channel %u already has all VLAN filters set\n",
667 spin_unlock_irqrestore(&nc
->lock
, flags
);
671 ncf
->vids
[index
] = vid
;
672 set_bit(index
, bitmap
);
673 spin_unlock_irqrestore(&nc
->lock
, flags
);
675 nca
->type
= NCSI_PKT_CMD_SVF
;
677 /* HW filter index starts at 1 */
678 nca
->bytes
[6] = index
+ 1;
679 nca
->bytes
[7] = 0x01;
684 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
686 /* NCSI OEM Command APIs */
687 static int ncsi_oem_gma_handler_bcm(struct ncsi_cmd_arg
*nca
)
689 unsigned char data
[NCSI_OEM_BCM_CMD_GMA_LEN
];
692 nca
->payload
= NCSI_OEM_BCM_CMD_GMA_LEN
;
694 memset(data
, 0, NCSI_OEM_BCM_CMD_GMA_LEN
);
695 *(unsigned int *)data
= ntohl(NCSI_OEM_MFR_BCM_ID
);
696 data
[5] = NCSI_OEM_BCM_CMD_GMA
;
700 ret
= ncsi_xmit_cmd(nca
);
702 netdev_err(nca
->ndp
->ndev
.dev
,
703 "NCSI: Failed to transmit cmd 0x%x during configure\n",
708 static int ncsi_oem_gma_handler_mlx(struct ncsi_cmd_arg
*nca
)
711 u8 data_u8
[NCSI_OEM_MLX_CMD_GMA_LEN
];
712 u32 data_u32
[NCSI_OEM_MLX_CMD_GMA_LEN
/ sizeof(u32
)];
716 nca
->payload
= NCSI_OEM_MLX_CMD_GMA_LEN
;
718 memset(&u
, 0, sizeof(u
));
719 u
.data_u32
[0] = ntohl(NCSI_OEM_MFR_MLX_ID
);
720 u
.data_u8
[5] = NCSI_OEM_MLX_CMD_GMA
;
721 u
.data_u8
[6] = NCSI_OEM_MLX_CMD_GMA_PARAM
;
723 nca
->data
= u
.data_u8
;
725 ret
= ncsi_xmit_cmd(nca
);
727 netdev_err(nca
->ndp
->ndev
.dev
,
728 "NCSI: Failed to transmit cmd 0x%x during configure\n",
733 /* OEM Command handlers initialization */
734 static struct ncsi_oem_gma_handler
{
736 int (*handler
)(struct ncsi_cmd_arg
*nca
);
737 } ncsi_oem_gma_handlers
[] = {
738 { NCSI_OEM_MFR_BCM_ID
, ncsi_oem_gma_handler_bcm
},
739 { NCSI_OEM_MFR_MLX_ID
, ncsi_oem_gma_handler_mlx
}
742 static int ncsi_gma_handler(struct ncsi_cmd_arg
*nca
, unsigned int mf_id
)
744 struct ncsi_oem_gma_handler
*nch
= NULL
;
747 /* This function should only be called once, return if flag set */
748 if (nca
->ndp
->gma_flag
== 1)
751 /* Find gma handler for given manufacturer id */
752 for (i
= 0; i
< ARRAY_SIZE(ncsi_oem_gma_handlers
); i
++) {
753 if (ncsi_oem_gma_handlers
[i
].mfr_id
== mf_id
) {
754 if (ncsi_oem_gma_handlers
[i
].handler
)
755 nch
= &ncsi_oem_gma_handlers
[i
];
761 netdev_err(nca
->ndp
->ndev
.dev
,
762 "NCSI: No GMA handler available for MFR-ID (0x%x)\n",
767 /* Set the flag for GMA command which should only be called once */
768 nca
->ndp
->gma_flag
= 1;
770 /* Get Mac address from NCSI device */
771 return nch
->handler(nca
);
774 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
776 /* Determine if a given channel from the channel_queue should be used for Tx */
777 static bool ncsi_channel_is_tx(struct ncsi_dev_priv
*ndp
,
778 struct ncsi_channel
*nc
)
780 struct ncsi_channel_mode
*ncm
;
781 struct ncsi_channel
*channel
;
782 struct ncsi_package
*np
;
784 /* Check if any other channel has Tx enabled; a channel may have already
785 * been configured and removed from the channel queue.
787 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
788 if (!ndp
->multi_package
&& np
!= nc
->package
)
790 NCSI_FOR_EACH_CHANNEL(np
, channel
) {
791 ncm
= &channel
->modes
[NCSI_MODE_TX_ENABLE
];
797 /* This channel is the preferred channel and has link */
798 list_for_each_entry_rcu(channel
, &ndp
->channel_queue
, link
) {
799 np
= channel
->package
;
800 if (np
->preferred_channel
&&
801 ncsi_channel_has_link(np
->preferred_channel
)) {
802 return np
->preferred_channel
== nc
;
806 /* This channel has link */
807 if (ncsi_channel_has_link(nc
))
810 list_for_each_entry_rcu(channel
, &ndp
->channel_queue
, link
)
811 if (ncsi_channel_has_link(channel
))
814 /* No other channel has link; default to this one */
818 /* Change the active Tx channel in a multi-channel setup */
819 int ncsi_update_tx_channel(struct ncsi_dev_priv
*ndp
,
820 struct ncsi_package
*package
,
821 struct ncsi_channel
*disable
,
822 struct ncsi_channel
*enable
)
824 struct ncsi_cmd_arg nca
;
825 struct ncsi_channel
*nc
;
826 struct ncsi_package
*np
;
829 if (!package
->multi_channel
&& !ndp
->multi_package
)
830 netdev_warn(ndp
->ndev
.dev
,
831 "NCSI: Trying to update Tx channel in single-channel mode\n");
835 /* Find current channel with Tx enabled */
836 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
839 if (!ndp
->multi_package
&& np
!= package
)
842 NCSI_FOR_EACH_CHANNEL(np
, nc
)
843 if (nc
->modes
[NCSI_MODE_TX_ENABLE
].enable
) {
849 /* Find a suitable channel for Tx */
850 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
853 if (!ndp
->multi_package
&& np
!= package
)
855 if (!(ndp
->package_whitelist
& (0x1 << np
->id
)))
858 if (np
->preferred_channel
&&
859 ncsi_channel_has_link(np
->preferred_channel
)) {
860 enable
= np
->preferred_channel
;
864 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
865 if (!(np
->channel_whitelist
& 0x1 << nc
->id
))
867 if (nc
->state
!= NCSI_CHANNEL_ACTIVE
)
869 if (ncsi_channel_has_link(nc
)) {
876 if (disable
== enable
)
883 nca
.channel
= disable
->id
;
884 nca
.package
= disable
->package
->id
;
885 nca
.type
= NCSI_PKT_CMD_DCNT
;
886 ret
= ncsi_xmit_cmd(&nca
);
888 netdev_err(ndp
->ndev
.dev
,
889 "Error %d sending DCNT\n",
893 netdev_info(ndp
->ndev
.dev
, "NCSI: channel %u enables Tx\n", enable
->id
);
895 nca
.channel
= enable
->id
;
896 nca
.package
= enable
->package
->id
;
897 nca
.type
= NCSI_PKT_CMD_ECNT
;
898 ret
= ncsi_xmit_cmd(&nca
);
900 netdev_err(ndp
->ndev
.dev
,
901 "Error %d sending ECNT\n",
907 static void ncsi_configure_channel(struct ncsi_dev_priv
*ndp
)
909 struct ncsi_package
*np
= ndp
->active_package
;
910 struct ncsi_channel
*nc
= ndp
->active_channel
;
911 struct ncsi_channel
*hot_nc
= NULL
;
912 struct ncsi_dev
*nd
= &ndp
->ndev
;
913 struct net_device
*dev
= nd
->dev
;
914 struct ncsi_cmd_arg nca
;
920 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
922 case ncsi_dev_state_config
:
923 case ncsi_dev_state_config_sp
:
924 ndp
->pending_req_num
= 1;
926 /* Select the specific package */
927 nca
.type
= NCSI_PKT_CMD_SP
;
928 if (ndp
->flags
& NCSI_DEV_HWA
)
932 nca
.package
= np
->id
;
933 nca
.channel
= NCSI_RESERVED_CHANNEL
;
934 ret
= ncsi_xmit_cmd(&nca
);
936 netdev_err(ndp
->ndev
.dev
,
937 "NCSI: Failed to transmit CMD_SP\n");
941 nd
->state
= ncsi_dev_state_config_cis
;
943 case ncsi_dev_state_config_cis
:
944 ndp
->pending_req_num
= 1;
946 /* Clear initial state */
947 nca
.type
= NCSI_PKT_CMD_CIS
;
948 nca
.package
= np
->id
;
949 nca
.channel
= nc
->id
;
950 ret
= ncsi_xmit_cmd(&nca
);
952 netdev_err(ndp
->ndev
.dev
,
953 "NCSI: Failed to transmit CMD_CIS\n");
957 nd
->state
= ncsi_dev_state_config_oem_gma
;
959 case ncsi_dev_state_config_oem_gma
:
960 nd
->state
= ncsi_dev_state_config_clear_vids
;
963 #if IS_ENABLED(CONFIG_NCSI_OEM_CMD_GET_MAC)
964 nca
.type
= NCSI_PKT_CMD_OEM
;
965 nca
.package
= np
->id
;
966 nca
.channel
= nc
->id
;
967 ndp
->pending_req_num
= 1;
968 ret
= ncsi_gma_handler(&nca
, nc
->version
.mf_id
);
969 #endif /* CONFIG_NCSI_OEM_CMD_GET_MAC */
972 schedule_work(&ndp
->work
);
975 case ncsi_dev_state_config_clear_vids
:
976 case ncsi_dev_state_config_svf
:
977 case ncsi_dev_state_config_ev
:
978 case ncsi_dev_state_config_sma
:
979 case ncsi_dev_state_config_ebf
:
980 case ncsi_dev_state_config_dgmf
:
981 case ncsi_dev_state_config_ecnt
:
982 case ncsi_dev_state_config_ec
:
983 case ncsi_dev_state_config_ae
:
984 case ncsi_dev_state_config_gls
:
985 ndp
->pending_req_num
= 1;
987 nca
.package
= np
->id
;
988 nca
.channel
= nc
->id
;
990 /* Clear any active filters on the channel before setting */
991 if (nd
->state
== ncsi_dev_state_config_clear_vids
) {
992 ret
= clear_one_vid(ndp
, nc
, &nca
);
994 nd
->state
= ncsi_dev_state_config_svf
;
995 schedule_work(&ndp
->work
);
999 nd
->state
= ncsi_dev_state_config_clear_vids
;
1000 /* Add known VLAN tags to the filter */
1001 } else if (nd
->state
== ncsi_dev_state_config_svf
) {
1002 ret
= set_one_vid(ndp
, nc
, &nca
);
1004 nd
->state
= ncsi_dev_state_config_ev
;
1005 schedule_work(&ndp
->work
);
1009 nd
->state
= ncsi_dev_state_config_svf
;
1010 /* Enable/Disable the VLAN filter */
1011 } else if (nd
->state
== ncsi_dev_state_config_ev
) {
1012 if (list_empty(&ndp
->vlan_vids
)) {
1013 nca
.type
= NCSI_PKT_CMD_DV
;
1015 nca
.type
= NCSI_PKT_CMD_EV
;
1016 nca
.bytes
[3] = NCSI_CAP_VLAN_NO
;
1018 nd
->state
= ncsi_dev_state_config_sma
;
1019 } else if (nd
->state
== ncsi_dev_state_config_sma
) {
1020 /* Use first entry in unicast filter table. Note that
1021 * the MAC filter table starts from entry 1 instead of
1024 nca
.type
= NCSI_PKT_CMD_SMA
;
1025 for (index
= 0; index
< 6; index
++)
1026 nca
.bytes
[index
] = dev
->dev_addr
[index
];
1029 nd
->state
= ncsi_dev_state_config_ebf
;
1030 } else if (nd
->state
== ncsi_dev_state_config_ebf
) {
1031 nca
.type
= NCSI_PKT_CMD_EBF
;
1032 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_BC
].cap
;
1033 /* if multicast global filtering is supported then
1034 * disable it so that all multicast packet will be
1035 * forwarded to management controller
1037 if (nc
->caps
[NCSI_CAP_GENERIC
].cap
&
1038 NCSI_CAP_GENERIC_MC
)
1039 nd
->state
= ncsi_dev_state_config_dgmf
;
1040 else if (ncsi_channel_is_tx(ndp
, nc
))
1041 nd
->state
= ncsi_dev_state_config_ecnt
;
1043 nd
->state
= ncsi_dev_state_config_ec
;
1044 } else if (nd
->state
== ncsi_dev_state_config_dgmf
) {
1045 nca
.type
= NCSI_PKT_CMD_DGMF
;
1046 if (ncsi_channel_is_tx(ndp
, nc
))
1047 nd
->state
= ncsi_dev_state_config_ecnt
;
1049 nd
->state
= ncsi_dev_state_config_ec
;
1050 } else if (nd
->state
== ncsi_dev_state_config_ecnt
) {
1051 if (np
->preferred_channel
&&
1052 nc
!= np
->preferred_channel
)
1053 netdev_info(ndp
->ndev
.dev
,
1054 "NCSI: Tx failed over to channel %u\n",
1056 nca
.type
= NCSI_PKT_CMD_ECNT
;
1057 nd
->state
= ncsi_dev_state_config_ec
;
1058 } else if (nd
->state
== ncsi_dev_state_config_ec
) {
1059 /* Enable AEN if it's supported */
1060 nca
.type
= NCSI_PKT_CMD_EC
;
1061 nd
->state
= ncsi_dev_state_config_ae
;
1062 if (!(nc
->caps
[NCSI_CAP_AEN
].cap
& NCSI_CAP_AEN_MASK
))
1063 nd
->state
= ncsi_dev_state_config_gls
;
1064 } else if (nd
->state
== ncsi_dev_state_config_ae
) {
1065 nca
.type
= NCSI_PKT_CMD_AE
;
1067 nca
.dwords
[1] = nc
->caps
[NCSI_CAP_AEN
].cap
;
1068 nd
->state
= ncsi_dev_state_config_gls
;
1069 } else if (nd
->state
== ncsi_dev_state_config_gls
) {
1070 nca
.type
= NCSI_PKT_CMD_GLS
;
1071 nd
->state
= ncsi_dev_state_config_done
;
1074 ret
= ncsi_xmit_cmd(&nca
);
1076 netdev_err(ndp
->ndev
.dev
,
1077 "NCSI: Failed to transmit CMD %x\n",
1082 case ncsi_dev_state_config_done
:
1083 netdev_dbg(ndp
->ndev
.dev
, "NCSI: channel %u config done\n",
1085 spin_lock_irqsave(&nc
->lock
, flags
);
1086 nc
->state
= NCSI_CHANNEL_ACTIVE
;
1088 if (ndp
->flags
& NCSI_DEV_RESET
) {
1089 /* A reset event happened during config, start it now */
1090 nc
->reconfigure_needed
= false;
1091 spin_unlock_irqrestore(&nc
->lock
, flags
);
1096 if (nc
->reconfigure_needed
) {
1097 /* This channel's configuration has been updated
1098 * part-way during the config state - start the
1099 * channel configuration over
1101 nc
->reconfigure_needed
= false;
1102 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1103 spin_unlock_irqrestore(&nc
->lock
, flags
);
1105 spin_lock_irqsave(&ndp
->lock
, flags
);
1106 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
1107 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1109 netdev_dbg(dev
, "Dirty NCSI channel state reset\n");
1110 ncsi_process_next_channel(ndp
);
1114 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1) {
1118 netdev_dbg(ndp
->ndev
.dev
,
1119 "NCSI: channel %u link down after config\n",
1122 spin_unlock_irqrestore(&nc
->lock
, flags
);
1124 /* Update the hot channel */
1125 spin_lock_irqsave(&ndp
->lock
, flags
);
1126 ndp
->hot_channel
= hot_nc
;
1127 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1129 ncsi_start_channel_monitor(nc
);
1130 ncsi_process_next_channel(ndp
);
1133 netdev_alert(dev
, "Wrong NCSI state 0x%x in config\n",
1140 ncsi_report_link(ndp
, true);
1143 static int ncsi_choose_active_channel(struct ncsi_dev_priv
*ndp
)
1145 struct ncsi_channel
*nc
, *found
, *hot_nc
;
1146 struct ncsi_channel_mode
*ncm
;
1147 unsigned long flags
, cflags
;
1148 struct ncsi_package
*np
;
1151 spin_lock_irqsave(&ndp
->lock
, flags
);
1152 hot_nc
= ndp
->hot_channel
;
1153 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1155 /* By default the search is done once an inactive channel with up
1156 * link is found, unless a preferred channel is set.
1157 * If multi_package or multi_channel are configured all channels in the
1158 * whitelist are added to the channel queue.
1162 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1163 if (!(ndp
->package_whitelist
& (0x1 << np
->id
)))
1165 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1166 if (!(np
->channel_whitelist
& (0x1 << nc
->id
)))
1169 spin_lock_irqsave(&nc
->lock
, cflags
);
1171 if (!list_empty(&nc
->link
) ||
1172 nc
->state
!= NCSI_CHANNEL_INACTIVE
) {
1173 spin_unlock_irqrestore(&nc
->lock
, cflags
);
1183 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
1184 if (ncm
->data
[2] & 0x1) {
1189 /* If multi_channel is enabled configure all valid
1190 * channels whether or not they currently have link
1191 * so they will have AENs enabled.
1193 if (with_link
|| np
->multi_channel
) {
1194 spin_lock_irqsave(&ndp
->lock
, flags
);
1195 list_add_tail_rcu(&nc
->link
,
1196 &ndp
->channel_queue
);
1197 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1199 netdev_dbg(ndp
->ndev
.dev
,
1200 "NCSI: Channel %u added to queue (link %s)\n",
1202 ncm
->data
[2] & 0x1 ? "up" : "down");
1205 spin_unlock_irqrestore(&nc
->lock
, cflags
);
1207 if (with_link
&& !np
->multi_channel
)
1210 if (with_link
&& !ndp
->multi_package
)
1214 if (list_empty(&ndp
->channel_queue
) && found
) {
1215 netdev_info(ndp
->ndev
.dev
,
1216 "NCSI: No channel with link found, configuring channel %u\n",
1218 spin_lock_irqsave(&ndp
->lock
, flags
);
1219 list_add_tail_rcu(&found
->link
, &ndp
->channel_queue
);
1220 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1221 } else if (!found
) {
1222 netdev_warn(ndp
->ndev
.dev
,
1223 "NCSI: No channel found to configure!\n");
1224 ncsi_report_link(ndp
, true);
1228 return ncsi_process_next_channel(ndp
);
1231 static bool ncsi_check_hwa(struct ncsi_dev_priv
*ndp
)
1233 struct ncsi_package
*np
;
1234 struct ncsi_channel
*nc
;
1236 bool has_channel
= false;
1238 /* The hardware arbitration is disabled if any one channel
1239 * doesn't support explicitly.
1241 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1242 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1245 cap
= nc
->caps
[NCSI_CAP_GENERIC
].cap
;
1246 if (!(cap
& NCSI_CAP_GENERIC_HWA
) ||
1247 (cap
& NCSI_CAP_GENERIC_HWA_MASK
) !=
1248 NCSI_CAP_GENERIC_HWA_SUPPORT
) {
1249 ndp
->flags
&= ~NCSI_DEV_HWA
;
1256 ndp
->flags
|= NCSI_DEV_HWA
;
1260 ndp
->flags
&= ~NCSI_DEV_HWA
;
1264 static void ncsi_probe_channel(struct ncsi_dev_priv
*ndp
)
1266 struct ncsi_dev
*nd
= &ndp
->ndev
;
1267 struct ncsi_package
*np
;
1268 struct ncsi_channel
*nc
;
1269 struct ncsi_cmd_arg nca
;
1270 unsigned char index
;
1274 nca
.req_flags
= NCSI_REQ_FLAG_EVENT_DRIVEN
;
1275 switch (nd
->state
) {
1276 case ncsi_dev_state_probe
:
1277 nd
->state
= ncsi_dev_state_probe_deselect
;
1279 case ncsi_dev_state_probe_deselect
:
1280 ndp
->pending_req_num
= 8;
1282 /* Deselect all possible packages */
1283 nca
.type
= NCSI_PKT_CMD_DP
;
1284 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1285 for (index
= 0; index
< 8; index
++) {
1286 nca
.package
= index
;
1287 ret
= ncsi_xmit_cmd(&nca
);
1292 nd
->state
= ncsi_dev_state_probe_package
;
1294 case ncsi_dev_state_probe_package
:
1295 ndp
->pending_req_num
= 1;
1297 nca
.type
= NCSI_PKT_CMD_SP
;
1299 nca
.package
= ndp
->package_probe_id
;
1300 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1301 ret
= ncsi_xmit_cmd(&nca
);
1304 nd
->state
= ncsi_dev_state_probe_channel
;
1306 case ncsi_dev_state_probe_channel
:
1307 ndp
->active_package
= ncsi_find_package(ndp
,
1308 ndp
->package_probe_id
);
1309 if (!ndp
->active_package
) {
1311 nd
->state
= ncsi_dev_state_probe_dp
;
1312 schedule_work(&ndp
->work
);
1315 nd
->state
= ncsi_dev_state_probe_cis
;
1316 schedule_work(&ndp
->work
);
1318 case ncsi_dev_state_probe_cis
:
1319 ndp
->pending_req_num
= NCSI_RESERVED_CHANNEL
;
1321 /* Clear initial state */
1322 nca
.type
= NCSI_PKT_CMD_CIS
;
1323 nca
.package
= ndp
->active_package
->id
;
1324 for (index
= 0; index
< NCSI_RESERVED_CHANNEL
; index
++) {
1325 nca
.channel
= index
;
1326 ret
= ncsi_xmit_cmd(&nca
);
1331 nd
->state
= ncsi_dev_state_probe_gvi
;
1333 case ncsi_dev_state_probe_gvi
:
1334 case ncsi_dev_state_probe_gc
:
1335 case ncsi_dev_state_probe_gls
:
1336 np
= ndp
->active_package
;
1337 ndp
->pending_req_num
= np
->channel_num
;
1339 /* Retrieve version, capability or link status */
1340 if (nd
->state
== ncsi_dev_state_probe_gvi
)
1341 nca
.type
= NCSI_PKT_CMD_GVI
;
1342 else if (nd
->state
== ncsi_dev_state_probe_gc
)
1343 nca
.type
= NCSI_PKT_CMD_GC
;
1345 nca
.type
= NCSI_PKT_CMD_GLS
;
1347 nca
.package
= np
->id
;
1348 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1349 nca
.channel
= nc
->id
;
1350 ret
= ncsi_xmit_cmd(&nca
);
1355 if (nd
->state
== ncsi_dev_state_probe_gvi
)
1356 nd
->state
= ncsi_dev_state_probe_gc
;
1357 else if (nd
->state
== ncsi_dev_state_probe_gc
)
1358 nd
->state
= ncsi_dev_state_probe_gls
;
1360 nd
->state
= ncsi_dev_state_probe_dp
;
1362 case ncsi_dev_state_probe_dp
:
1363 ndp
->pending_req_num
= 1;
1365 /* Deselect the current package */
1366 nca
.type
= NCSI_PKT_CMD_DP
;
1367 nca
.package
= ndp
->package_probe_id
;
1368 nca
.channel
= NCSI_RESERVED_CHANNEL
;
1369 ret
= ncsi_xmit_cmd(&nca
);
1373 /* Probe next package */
1374 ndp
->package_probe_id
++;
1375 if (ndp
->package_probe_id
>= 8) {
1376 /* Probe finished */
1377 ndp
->flags
|= NCSI_DEV_PROBED
;
1380 nd
->state
= ncsi_dev_state_probe_package
;
1381 ndp
->active_package
= NULL
;
1384 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%0x in enumeration\n",
1388 if (ndp
->flags
& NCSI_DEV_PROBED
) {
1389 /* Check if all packages have HWA support */
1390 ncsi_check_hwa(ndp
);
1391 ncsi_choose_active_channel(ndp
);
1396 netdev_err(ndp
->ndev
.dev
,
1397 "NCSI: Failed to transmit cmd 0x%x during probe\n",
1399 ncsi_report_link(ndp
, true);
1402 static void ncsi_dev_work(struct work_struct
*work
)
1404 struct ncsi_dev_priv
*ndp
= container_of(work
,
1405 struct ncsi_dev_priv
, work
);
1406 struct ncsi_dev
*nd
= &ndp
->ndev
;
1408 switch (nd
->state
& ncsi_dev_state_major
) {
1409 case ncsi_dev_state_probe
:
1410 ncsi_probe_channel(ndp
);
1412 case ncsi_dev_state_suspend
:
1413 ncsi_suspend_channel(ndp
);
1415 case ncsi_dev_state_config
:
1416 ncsi_configure_channel(ndp
);
1419 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in workqueue\n",
1424 int ncsi_process_next_channel(struct ncsi_dev_priv
*ndp
)
1426 struct ncsi_channel
*nc
;
1428 unsigned long flags
;
1430 spin_lock_irqsave(&ndp
->lock
, flags
);
1431 nc
= list_first_or_null_rcu(&ndp
->channel_queue
,
1432 struct ncsi_channel
, link
);
1434 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1438 list_del_init(&nc
->link
);
1439 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1441 spin_lock_irqsave(&nc
->lock
, flags
);
1442 old_state
= nc
->state
;
1443 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
1444 spin_unlock_irqrestore(&nc
->lock
, flags
);
1446 ndp
->active_channel
= nc
;
1447 ndp
->active_package
= nc
->package
;
1449 switch (old_state
) {
1450 case NCSI_CHANNEL_INACTIVE
:
1451 ndp
->ndev
.state
= ncsi_dev_state_config
;
1452 netdev_dbg(ndp
->ndev
.dev
, "NCSI: configuring channel %u\n",
1454 ncsi_configure_channel(ndp
);
1456 case NCSI_CHANNEL_ACTIVE
:
1457 ndp
->ndev
.state
= ncsi_dev_state_suspend
;
1458 netdev_dbg(ndp
->ndev
.dev
, "NCSI: suspending channel %u\n",
1460 ncsi_suspend_channel(ndp
);
1463 netdev_err(ndp
->ndev
.dev
, "Invalid state 0x%x on %d:%d\n",
1464 old_state
, nc
->package
->id
, nc
->id
);
1465 ncsi_report_link(ndp
, false);
1472 ndp
->active_channel
= NULL
;
1473 ndp
->active_package
= NULL
;
1474 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
) {
1475 ndp
->flags
&= ~NCSI_DEV_RESHUFFLE
;
1476 return ncsi_choose_active_channel(ndp
);
1479 ncsi_report_link(ndp
, false);
1483 static int ncsi_kick_channels(struct ncsi_dev_priv
*ndp
)
1485 struct ncsi_dev
*nd
= &ndp
->ndev
;
1486 struct ncsi_channel
*nc
;
1487 struct ncsi_package
*np
;
1488 unsigned long flags
;
1491 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1492 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1493 spin_lock_irqsave(&nc
->lock
, flags
);
1495 /* Channels may be busy, mark dirty instead of
1497 * a) not ACTIVE (configured)
1498 * b) in the channel_queue (to be configured)
1499 * c) it's ndev is in the config state
1501 if (nc
->state
!= NCSI_CHANNEL_ACTIVE
) {
1502 if ((ndp
->ndev
.state
& 0xff00) ==
1503 ncsi_dev_state_config
||
1504 !list_empty(&nc
->link
)) {
1506 "NCSI: channel %p marked dirty\n",
1508 nc
->reconfigure_needed
= true;
1510 spin_unlock_irqrestore(&nc
->lock
, flags
);
1514 spin_unlock_irqrestore(&nc
->lock
, flags
);
1516 ncsi_stop_channel_monitor(nc
);
1517 spin_lock_irqsave(&nc
->lock
, flags
);
1518 nc
->state
= NCSI_CHANNEL_INACTIVE
;
1519 spin_unlock_irqrestore(&nc
->lock
, flags
);
1521 spin_lock_irqsave(&ndp
->lock
, flags
);
1522 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
1523 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1525 netdev_dbg(nd
->dev
, "NCSI: kicked channel %p\n", nc
);
1533 int ncsi_vlan_rx_add_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1535 struct ncsi_dev_priv
*ndp
;
1536 unsigned int n_vids
= 0;
1537 struct vlan_vid
*vlan
;
1538 struct ncsi_dev
*nd
;
1544 nd
= ncsi_find_dev(dev
);
1546 netdev_warn(dev
, "NCSI: No net_device?\n");
1550 ndp
= TO_NCSI_DEV_PRIV(nd
);
1552 /* Add the VLAN id to our internal list */
1553 list_for_each_entry_rcu(vlan
, &ndp
->vlan_vids
, list
) {
1555 if (vlan
->vid
== vid
) {
1556 netdev_dbg(dev
, "NCSI: vid %u already registered\n",
1561 if (n_vids
>= NCSI_MAX_VLAN_VIDS
) {
1563 "tried to add vlan id %u but NCSI max already registered (%u)\n",
1564 vid
, NCSI_MAX_VLAN_VIDS
);
1568 vlan
= kzalloc(sizeof(*vlan
), GFP_KERNEL
);
1572 vlan
->proto
= proto
;
1574 list_add_rcu(&vlan
->list
, &ndp
->vlan_vids
);
1576 netdev_dbg(dev
, "NCSI: Added new vid %u\n", vid
);
1578 found
= ncsi_kick_channels(ndp
) != 0;
1580 return found
? ncsi_process_next_channel(ndp
) : 0;
1582 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_add_vid
);
1584 int ncsi_vlan_rx_kill_vid(struct net_device
*dev
, __be16 proto
, u16 vid
)
1586 struct vlan_vid
*vlan
, *tmp
;
1587 struct ncsi_dev_priv
*ndp
;
1588 struct ncsi_dev
*nd
;
1594 nd
= ncsi_find_dev(dev
);
1596 netdev_warn(dev
, "NCSI: no net_device?\n");
1600 ndp
= TO_NCSI_DEV_PRIV(nd
);
1602 /* Remove the VLAN id from our internal list */
1603 list_for_each_entry_safe(vlan
, tmp
, &ndp
->vlan_vids
, list
)
1604 if (vlan
->vid
== vid
) {
1605 netdev_dbg(dev
, "NCSI: vid %u found, removing\n", vid
);
1606 list_del_rcu(&vlan
->list
);
1612 netdev_err(dev
, "NCSI: vid %u wasn't registered!\n", vid
);
1616 found
= ncsi_kick_channels(ndp
) != 0;
1618 return found
? ncsi_process_next_channel(ndp
) : 0;
1620 EXPORT_SYMBOL_GPL(ncsi_vlan_rx_kill_vid
);
1622 struct ncsi_dev
*ncsi_register_dev(struct net_device
*dev
,
1623 void (*handler
)(struct ncsi_dev
*ndev
))
1625 struct ncsi_dev_priv
*ndp
;
1626 struct ncsi_dev
*nd
;
1627 unsigned long flags
;
1630 /* Check if the device has been registered or not */
1631 nd
= ncsi_find_dev(dev
);
1635 /* Create NCSI device */
1636 ndp
= kzalloc(sizeof(*ndp
), GFP_ATOMIC
);
1641 nd
->state
= ncsi_dev_state_registered
;
1643 nd
->handler
= handler
;
1644 ndp
->pending_req_num
= 0;
1645 INIT_LIST_HEAD(&ndp
->channel_queue
);
1646 INIT_LIST_HEAD(&ndp
->vlan_vids
);
1647 INIT_WORK(&ndp
->work
, ncsi_dev_work
);
1648 ndp
->package_whitelist
= UINT_MAX
;
1650 /* Initialize private NCSI device */
1651 spin_lock_init(&ndp
->lock
);
1652 INIT_LIST_HEAD(&ndp
->packages
);
1653 ndp
->request_id
= NCSI_REQ_START_IDX
;
1654 for (i
= 0; i
< ARRAY_SIZE(ndp
->requests
); i
++) {
1655 ndp
->requests
[i
].id
= i
;
1656 ndp
->requests
[i
].ndp
= ndp
;
1657 timer_setup(&ndp
->requests
[i
].timer
, ncsi_request_timeout
, 0);
1660 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1661 list_add_tail_rcu(&ndp
->node
, &ncsi_dev_list
);
1662 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1664 /* Register NCSI packet Rx handler */
1665 ndp
->ptype
.type
= cpu_to_be16(ETH_P_NCSI
);
1666 ndp
->ptype
.func
= ncsi_rcv_rsp
;
1667 ndp
->ptype
.dev
= dev
;
1668 dev_add_pack(&ndp
->ptype
);
1670 /* Set up generic netlink interface */
1671 ncsi_init_netlink(dev
);
1675 EXPORT_SYMBOL_GPL(ncsi_register_dev
);
1677 int ncsi_start_dev(struct ncsi_dev
*nd
)
1679 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1681 if (nd
->state
!= ncsi_dev_state_registered
&&
1682 nd
->state
!= ncsi_dev_state_functional
)
1685 if (!(ndp
->flags
& NCSI_DEV_PROBED
)) {
1686 ndp
->package_probe_id
= 0;
1687 nd
->state
= ncsi_dev_state_probe
;
1688 schedule_work(&ndp
->work
);
1692 return ncsi_reset_dev(nd
);
1694 EXPORT_SYMBOL_GPL(ncsi_start_dev
);
1696 void ncsi_stop_dev(struct ncsi_dev
*nd
)
1698 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1699 struct ncsi_package
*np
;
1700 struct ncsi_channel
*nc
;
1703 unsigned long flags
;
1705 /* Stop the channel monitor on any active channels. Don't reset the
1706 * channel state so we know which were active when ncsi_start_dev()
1709 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1710 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1711 ncsi_stop_channel_monitor(nc
);
1713 spin_lock_irqsave(&nc
->lock
, flags
);
1714 chained
= !list_empty(&nc
->link
);
1715 old_state
= nc
->state
;
1716 spin_unlock_irqrestore(&nc
->lock
, flags
);
1718 WARN_ON_ONCE(chained
||
1719 old_state
== NCSI_CHANNEL_INVISIBLE
);
1723 netdev_dbg(ndp
->ndev
.dev
, "NCSI: Stopping device\n");
1724 ncsi_report_link(ndp
, true);
1726 EXPORT_SYMBOL_GPL(ncsi_stop_dev
);
1728 int ncsi_reset_dev(struct ncsi_dev
*nd
)
1730 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1731 struct ncsi_channel
*nc
, *active
, *tmp
;
1732 struct ncsi_package
*np
;
1733 unsigned long flags
;
1735 spin_lock_irqsave(&ndp
->lock
, flags
);
1737 if (!(ndp
->flags
& NCSI_DEV_RESET
)) {
1738 /* Haven't been called yet, check states */
1739 switch (nd
->state
& ncsi_dev_state_major
) {
1740 case ncsi_dev_state_registered
:
1741 case ncsi_dev_state_probe
:
1742 /* Not even probed yet - do nothing */
1743 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1745 case ncsi_dev_state_suspend
:
1746 case ncsi_dev_state_config
:
1747 /* Wait for the channel to finish its suspend/config
1748 * operation; once it finishes it will check for
1749 * NCSI_DEV_RESET and reset the state.
1751 ndp
->flags
|= NCSI_DEV_RESET
;
1752 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1756 switch (nd
->state
) {
1757 case ncsi_dev_state_suspend_done
:
1758 case ncsi_dev_state_config_done
:
1759 case ncsi_dev_state_functional
:
1763 /* Current reset operation happening */
1764 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1769 if (!list_empty(&ndp
->channel_queue
)) {
1770 /* Clear any channel queue we may have interrupted */
1771 list_for_each_entry_safe(nc
, tmp
, &ndp
->channel_queue
, link
)
1772 list_del_init(&nc
->link
);
1774 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1777 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1778 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1779 spin_lock_irqsave(&nc
->lock
, flags
);
1781 if (nc
->state
== NCSI_CHANNEL_ACTIVE
) {
1783 nc
->state
= NCSI_CHANNEL_INVISIBLE
;
1784 spin_unlock_irqrestore(&nc
->lock
, flags
);
1785 ncsi_stop_channel_monitor(nc
);
1789 spin_unlock_irqrestore(&nc
->lock
, flags
);
1797 spin_lock_irqsave(&ndp
->lock
, flags
);
1798 ndp
->flags
&= ~NCSI_DEV_RESET
;
1799 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1800 return ncsi_choose_active_channel(ndp
);
1803 spin_lock_irqsave(&ndp
->lock
, flags
);
1804 ndp
->flags
|= NCSI_DEV_RESET
;
1805 ndp
->active_channel
= active
;
1806 ndp
->active_package
= active
->package
;
1807 spin_unlock_irqrestore(&ndp
->lock
, flags
);
1809 nd
->state
= ncsi_dev_state_suspend
;
1810 schedule_work(&ndp
->work
);
1814 void ncsi_unregister_dev(struct ncsi_dev
*nd
)
1816 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1817 struct ncsi_package
*np
, *tmp
;
1818 unsigned long flags
;
1820 dev_remove_pack(&ndp
->ptype
);
1822 list_for_each_entry_safe(np
, tmp
, &ndp
->packages
, node
)
1823 ncsi_remove_package(np
);
1825 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1826 list_del_rcu(&ndp
->node
);
1827 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1829 ncsi_unregister_netlink(nd
->dev
);
1833 EXPORT_SYMBOL_GPL(ncsi_unregister_dev
);