2 * Copyright Gavin Shan, IBM Corporation 2016.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
10 #include <linux/module.h>
11 #include <linux/kernel.h>
12 #include <linux/init.h>
13 #include <linux/netdevice.h>
14 #include <linux/skbuff.h>
15 #include <linux/netlink.h>
18 #include <net/net_namespace.h>
20 #include <net/addrconf.h>
22 #include <net/if_inet6.h>
27 LIST_HEAD(ncsi_dev_list
);
28 DEFINE_SPINLOCK(ncsi_dev_lock
);
30 static inline int ncsi_filter_size(int table
)
32 int sizes
[] = { 2, 6, 6, 6 };
34 BUILD_BUG_ON(ARRAY_SIZE(sizes
) != NCSI_FILTER_MAX
);
35 if (table
< NCSI_FILTER_BASE
|| table
>= NCSI_FILTER_MAX
)
41 int ncsi_find_filter(struct ncsi_channel
*nc
, int table
, void *data
)
43 struct ncsi_channel_filter
*ncf
;
48 ncf
= nc
->filters
[table
];
52 size
= ncsi_filter_size(table
);
56 spin_lock_irqsave(&nc
->lock
, flags
);
57 bitmap
= (void *)&ncf
->bitmap
;
59 while ((index
= find_next_bit(bitmap
, ncf
->total
, index
+ 1))
61 if (!memcmp(ncf
->data
+ size
* index
, data
, size
)) {
62 spin_unlock_irqrestore(&nc
->lock
, flags
);
66 spin_unlock_irqrestore(&nc
->lock
, flags
);
71 int ncsi_add_filter(struct ncsi_channel
*nc
, int table
, void *data
)
73 struct ncsi_channel_filter
*ncf
;
78 size
= ncsi_filter_size(table
);
82 index
= ncsi_find_filter(nc
, table
, data
);
86 ncf
= nc
->filters
[table
];
90 spin_lock_irqsave(&nc
->lock
, flags
);
91 bitmap
= (void *)&ncf
->bitmap
;
93 index
= find_next_zero_bit(bitmap
, ncf
->total
, 0);
94 if (index
>= ncf
->total
) {
95 spin_unlock_irqrestore(&nc
->lock
, flags
);
98 } while (test_and_set_bit(index
, bitmap
));
100 memcpy(ncf
->data
+ size
* index
, data
, size
);
101 spin_unlock_irqrestore(&nc
->lock
, flags
);
106 int ncsi_remove_filter(struct ncsi_channel
*nc
, int table
, int index
)
108 struct ncsi_channel_filter
*ncf
;
113 size
= ncsi_filter_size(table
);
117 ncf
= nc
->filters
[table
];
118 if (!ncf
|| index
>= ncf
->total
)
121 spin_lock_irqsave(&nc
->lock
, flags
);
122 bitmap
= (void *)&ncf
->bitmap
;
123 if (test_and_clear_bit(index
, bitmap
))
124 memset(ncf
->data
+ size
* index
, 0, size
);
125 spin_unlock_irqrestore(&nc
->lock
, flags
);
130 static void ncsi_report_link(struct ncsi_dev_priv
*ndp
, bool force_down
)
132 struct ncsi_dev
*nd
= &ndp
->ndev
;
133 struct ncsi_package
*np
;
134 struct ncsi_channel
*nc
;
136 nd
->state
= ncsi_dev_state_functional
;
143 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
144 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
145 if (!list_empty(&nc
->link
) ||
146 nc
->state
!= NCSI_CHANNEL_ACTIVE
)
149 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1) {
160 static void ncsi_channel_monitor(unsigned long data
)
162 struct ncsi_channel
*nc
= (struct ncsi_channel
*)data
;
163 struct ncsi_package
*np
= nc
->package
;
164 struct ncsi_dev_priv
*ndp
= np
->ndp
;
165 struct ncsi_cmd_arg nca
;
167 unsigned int timeout
;
171 spin_lock_irqsave(&nc
->lock
, flags
);
172 timeout
= nc
->timeout
;
173 enabled
= nc
->enabled
;
174 spin_unlock_irqrestore(&nc
->lock
, flags
);
176 if (!enabled
|| !list_empty(&nc
->link
))
178 if (nc
->state
!= NCSI_CHANNEL_INACTIVE
&&
179 nc
->state
!= NCSI_CHANNEL_ACTIVE
)
182 if (!(timeout
% 2)) {
184 nca
.package
= np
->id
;
185 nca
.channel
= nc
->id
;
186 nca
.type
= NCSI_PKT_CMD_GLS
;
188 ret
= ncsi_xmit_cmd(&nca
);
190 netdev_err(ndp
->ndev
.dev
, "Error %d sending GLS\n",
196 if (timeout
+ 1 >= 3) {
197 if (!(ndp
->flags
& NCSI_DEV_HWA
) &&
198 nc
->state
== NCSI_CHANNEL_ACTIVE
)
199 ncsi_report_link(ndp
, true);
201 spin_lock_irqsave(&ndp
->lock
, flags
);
202 xchg(&nc
->state
, NCSI_CHANNEL_INACTIVE
);
203 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
204 spin_unlock_irqrestore(&ndp
->lock
, flags
);
205 ncsi_process_next_channel(ndp
);
209 spin_lock_irqsave(&nc
->lock
, flags
);
210 nc
->timeout
= timeout
+ 1;
212 spin_unlock_irqrestore(&nc
->lock
, flags
);
213 mod_timer(&nc
->timer
, jiffies
+ HZ
* (1 << (nc
->timeout
/ 2)));
216 void ncsi_start_channel_monitor(struct ncsi_channel
*nc
)
220 spin_lock_irqsave(&nc
->lock
, flags
);
221 WARN_ON_ONCE(nc
->enabled
);
224 spin_unlock_irqrestore(&nc
->lock
, flags
);
226 mod_timer(&nc
->timer
, jiffies
+ HZ
* (1 << (nc
->timeout
/ 2)));
229 void ncsi_stop_channel_monitor(struct ncsi_channel
*nc
)
233 spin_lock_irqsave(&nc
->lock
, flags
);
235 spin_unlock_irqrestore(&nc
->lock
, flags
);
239 spin_unlock_irqrestore(&nc
->lock
, flags
);
241 del_timer_sync(&nc
->timer
);
244 struct ncsi_channel
*ncsi_find_channel(struct ncsi_package
*np
,
247 struct ncsi_channel
*nc
;
249 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
257 struct ncsi_channel
*ncsi_add_channel(struct ncsi_package
*np
, unsigned char id
)
259 struct ncsi_channel
*nc
, *tmp
;
263 nc
= kzalloc(sizeof(*nc
), GFP_ATOMIC
);
269 nc
->state
= NCSI_CHANNEL_INACTIVE
;
271 setup_timer(&nc
->timer
, ncsi_channel_monitor
, (unsigned long)nc
);
272 spin_lock_init(&nc
->lock
);
273 INIT_LIST_HEAD(&nc
->link
);
274 for (index
= 0; index
< NCSI_CAP_MAX
; index
++)
275 nc
->caps
[index
].index
= index
;
276 for (index
= 0; index
< NCSI_MODE_MAX
; index
++)
277 nc
->modes
[index
].index
= index
;
279 spin_lock_irqsave(&np
->lock
, flags
);
280 tmp
= ncsi_find_channel(np
, id
);
282 spin_unlock_irqrestore(&np
->lock
, flags
);
287 list_add_tail_rcu(&nc
->node
, &np
->channels
);
289 spin_unlock_irqrestore(&np
->lock
, flags
);
294 static void ncsi_remove_channel(struct ncsi_channel
*nc
)
296 struct ncsi_package
*np
= nc
->package
;
297 struct ncsi_channel_filter
*ncf
;
301 /* Release filters */
302 spin_lock_irqsave(&nc
->lock
, flags
);
303 for (i
= 0; i
< NCSI_FILTER_MAX
; i
++) {
304 ncf
= nc
->filters
[i
];
308 nc
->filters
[i
] = NULL
;
312 nc
->state
= NCSI_CHANNEL_INACTIVE
;
313 spin_unlock_irqrestore(&nc
->lock
, flags
);
314 ncsi_stop_channel_monitor(nc
);
316 /* Remove and free channel */
317 spin_lock_irqsave(&np
->lock
, flags
);
318 list_del_rcu(&nc
->node
);
320 spin_unlock_irqrestore(&np
->lock
, flags
);
325 struct ncsi_package
*ncsi_find_package(struct ncsi_dev_priv
*ndp
,
328 struct ncsi_package
*np
;
330 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
338 struct ncsi_package
*ncsi_add_package(struct ncsi_dev_priv
*ndp
,
341 struct ncsi_package
*np
, *tmp
;
344 np
= kzalloc(sizeof(*np
), GFP_ATOMIC
);
350 spin_lock_init(&np
->lock
);
351 INIT_LIST_HEAD(&np
->channels
);
353 spin_lock_irqsave(&ndp
->lock
, flags
);
354 tmp
= ncsi_find_package(ndp
, id
);
356 spin_unlock_irqrestore(&ndp
->lock
, flags
);
361 list_add_tail_rcu(&np
->node
, &ndp
->packages
);
363 spin_unlock_irqrestore(&ndp
->lock
, flags
);
368 void ncsi_remove_package(struct ncsi_package
*np
)
370 struct ncsi_dev_priv
*ndp
= np
->ndp
;
371 struct ncsi_channel
*nc
, *tmp
;
374 /* Release all child channels */
375 list_for_each_entry_safe(nc
, tmp
, &np
->channels
, node
)
376 ncsi_remove_channel(nc
);
378 /* Remove and free package */
379 spin_lock_irqsave(&ndp
->lock
, flags
);
380 list_del_rcu(&np
->node
);
382 spin_unlock_irqrestore(&ndp
->lock
, flags
);
387 void ncsi_find_package_and_channel(struct ncsi_dev_priv
*ndp
,
389 struct ncsi_package
**np
,
390 struct ncsi_channel
**nc
)
392 struct ncsi_package
*p
;
393 struct ncsi_channel
*c
;
395 p
= ncsi_find_package(ndp
, NCSI_PACKAGE_INDEX(id
));
396 c
= p
? ncsi_find_channel(p
, NCSI_CHANNEL_INDEX(id
)) : NULL
;
404 /* For two consecutive NCSI commands, the packet IDs shouldn't
405 * be same. Otherwise, the bogus response might be replied. So
406 * the available IDs are allocated in round-robin fashion.
408 struct ncsi_request
*ncsi_alloc_request(struct ncsi_dev_priv
*ndp
, bool driven
)
410 struct ncsi_request
*nr
= NULL
;
411 int i
, limit
= ARRAY_SIZE(ndp
->requests
);
414 /* Check if there is one available request until the ceiling */
415 spin_lock_irqsave(&ndp
->lock
, flags
);
416 for (i
= ndp
->request_id
; !nr
&& i
< limit
; i
++) {
417 if (ndp
->requests
[i
].used
)
420 nr
= &ndp
->requests
[i
];
423 if (++ndp
->request_id
>= limit
)
427 /* Fail back to check from the starting cursor */
428 for (i
= 0; !nr
&& i
< ndp
->request_id
; i
++) {
429 if (ndp
->requests
[i
].used
)
432 nr
= &ndp
->requests
[i
];
435 if (++ndp
->request_id
>= limit
)
438 spin_unlock_irqrestore(&ndp
->lock
, flags
);
443 void ncsi_free_request(struct ncsi_request
*nr
)
445 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
446 struct sk_buff
*cmd
, *rsp
;
452 del_timer_sync(&nr
->timer
);
455 spin_lock_irqsave(&ndp
->lock
, flags
);
462 spin_unlock_irqrestore(&ndp
->lock
, flags
);
464 if (driven
&& cmd
&& --ndp
->pending_req_num
== 0)
465 schedule_work(&ndp
->work
);
467 /* Release command and response */
472 struct ncsi_dev
*ncsi_find_dev(struct net_device
*dev
)
474 struct ncsi_dev_priv
*ndp
;
476 NCSI_FOR_EACH_DEV(ndp
) {
477 if (ndp
->ndev
.dev
== dev
)
484 static void ncsi_request_timeout(unsigned long data
)
486 struct ncsi_request
*nr
= (struct ncsi_request
*)data
;
487 struct ncsi_dev_priv
*ndp
= nr
->ndp
;
490 /* If the request already had associated response,
491 * let the response handler to release it.
493 spin_lock_irqsave(&ndp
->lock
, flags
);
495 if (nr
->rsp
|| !nr
->cmd
) {
496 spin_unlock_irqrestore(&ndp
->lock
, flags
);
499 spin_unlock_irqrestore(&ndp
->lock
, flags
);
501 /* Release the request */
502 ncsi_free_request(nr
);
505 static void ncsi_suspend_channel(struct ncsi_dev_priv
*ndp
)
507 struct ncsi_dev
*nd
= &ndp
->ndev
;
508 struct ncsi_package
*np
= ndp
->active_package
;
509 struct ncsi_channel
*nc
= ndp
->active_channel
;
510 struct ncsi_cmd_arg nca
;
516 case ncsi_dev_state_suspend
:
517 nd
->state
= ncsi_dev_state_suspend_select
;
519 case ncsi_dev_state_suspend_select
:
520 case ncsi_dev_state_suspend_dcnt
:
521 case ncsi_dev_state_suspend_dc
:
522 case ncsi_dev_state_suspend_deselect
:
523 ndp
->pending_req_num
= 1;
525 np
= ndp
->active_package
;
526 nc
= ndp
->active_channel
;
527 nca
.package
= np
->id
;
528 if (nd
->state
== ncsi_dev_state_suspend_select
) {
529 nca
.type
= NCSI_PKT_CMD_SP
;
531 if (ndp
->flags
& NCSI_DEV_HWA
)
535 nd
->state
= ncsi_dev_state_suspend_dcnt
;
536 } else if (nd
->state
== ncsi_dev_state_suspend_dcnt
) {
537 nca
.type
= NCSI_PKT_CMD_DCNT
;
538 nca
.channel
= nc
->id
;
539 nd
->state
= ncsi_dev_state_suspend_dc
;
540 } else if (nd
->state
== ncsi_dev_state_suspend_dc
) {
541 nca
.type
= NCSI_PKT_CMD_DC
;
542 nca
.channel
= nc
->id
;
544 nd
->state
= ncsi_dev_state_suspend_deselect
;
545 } else if (nd
->state
== ncsi_dev_state_suspend_deselect
) {
546 nca
.type
= NCSI_PKT_CMD_DP
;
548 nd
->state
= ncsi_dev_state_suspend_done
;
551 ret
= ncsi_xmit_cmd(&nca
);
553 nd
->state
= ncsi_dev_state_functional
;
558 case ncsi_dev_state_suspend_done
:
559 xchg(&nc
->state
, NCSI_CHANNEL_INACTIVE
);
560 ncsi_process_next_channel(ndp
);
564 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in suspend\n",
569 static void ncsi_configure_channel(struct ncsi_dev_priv
*ndp
)
571 struct ncsi_dev
*nd
= &ndp
->ndev
;
572 struct net_device
*dev
= nd
->dev
;
573 struct ncsi_package
*np
= ndp
->active_package
;
574 struct ncsi_channel
*nc
= ndp
->active_channel
;
575 struct ncsi_cmd_arg nca
;
582 case ncsi_dev_state_config
:
583 case ncsi_dev_state_config_sp
:
584 ndp
->pending_req_num
= 1;
586 /* Select the specific package */
587 nca
.type
= NCSI_PKT_CMD_SP
;
588 if (ndp
->flags
& NCSI_DEV_HWA
)
592 nca
.package
= np
->id
;
594 ret
= ncsi_xmit_cmd(&nca
);
598 nd
->state
= ncsi_dev_state_config_cis
;
600 case ncsi_dev_state_config_cis
:
601 ndp
->pending_req_num
= 1;
603 /* Clear initial state */
604 nca
.type
= NCSI_PKT_CMD_CIS
;
605 nca
.package
= np
->id
;
606 nca
.channel
= nc
->id
;
607 ret
= ncsi_xmit_cmd(&nca
);
611 nd
->state
= ncsi_dev_state_config_sma
;
613 case ncsi_dev_state_config_sma
:
614 case ncsi_dev_state_config_ebf
:
615 #if IS_ENABLED(CONFIG_IPV6)
616 case ncsi_dev_state_config_egmf
:
618 case ncsi_dev_state_config_ecnt
:
619 case ncsi_dev_state_config_ec
:
620 case ncsi_dev_state_config_ae
:
621 case ncsi_dev_state_config_gls
:
622 ndp
->pending_req_num
= 1;
624 nca
.package
= np
->id
;
625 nca
.channel
= nc
->id
;
627 /* Use first entry in unicast filter table. Note that
628 * the MAC filter table starts from entry 1 instead of
631 if (nd
->state
== ncsi_dev_state_config_sma
) {
632 nca
.type
= NCSI_PKT_CMD_SMA
;
633 for (index
= 0; index
< 6; index
++)
634 nca
.bytes
[index
] = dev
->dev_addr
[index
];
637 nd
->state
= ncsi_dev_state_config_ebf
;
638 } else if (nd
->state
== ncsi_dev_state_config_ebf
) {
639 nca
.type
= NCSI_PKT_CMD_EBF
;
640 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_BC
].cap
;
641 nd
->state
= ncsi_dev_state_config_ecnt
;
642 #if IS_ENABLED(CONFIG_IPV6)
643 if (ndp
->inet6_addr_num
> 0 &&
644 (nc
->caps
[NCSI_CAP_GENERIC
].cap
&
645 NCSI_CAP_GENERIC_MC
))
646 nd
->state
= ncsi_dev_state_config_egmf
;
648 nd
->state
= ncsi_dev_state_config_ecnt
;
649 } else if (nd
->state
== ncsi_dev_state_config_egmf
) {
650 nca
.type
= NCSI_PKT_CMD_EGMF
;
651 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
652 nd
->state
= ncsi_dev_state_config_ecnt
;
653 #endif /* CONFIG_IPV6 */
654 } else if (nd
->state
== ncsi_dev_state_config_ecnt
) {
655 nca
.type
= NCSI_PKT_CMD_ECNT
;
656 nd
->state
= ncsi_dev_state_config_ec
;
657 } else if (nd
->state
== ncsi_dev_state_config_ec
) {
658 /* Enable AEN if it's supported */
659 nca
.type
= NCSI_PKT_CMD_EC
;
660 nd
->state
= ncsi_dev_state_config_ae
;
661 if (!(nc
->caps
[NCSI_CAP_AEN
].cap
& NCSI_CAP_AEN_MASK
))
662 nd
->state
= ncsi_dev_state_config_gls
;
663 } else if (nd
->state
== ncsi_dev_state_config_ae
) {
664 nca
.type
= NCSI_PKT_CMD_AE
;
666 nca
.dwords
[1] = nc
->caps
[NCSI_CAP_AEN
].cap
;
667 nd
->state
= ncsi_dev_state_config_gls
;
668 } else if (nd
->state
== ncsi_dev_state_config_gls
) {
669 nca
.type
= NCSI_PKT_CMD_GLS
;
670 nd
->state
= ncsi_dev_state_config_done
;
673 ret
= ncsi_xmit_cmd(&nca
);
677 case ncsi_dev_state_config_done
:
678 if (nc
->modes
[NCSI_MODE_LINK
].data
[2] & 0x1)
679 xchg(&nc
->state
, NCSI_CHANNEL_ACTIVE
);
681 xchg(&nc
->state
, NCSI_CHANNEL_INACTIVE
);
683 ncsi_start_channel_monitor(nc
);
684 ncsi_process_next_channel(ndp
);
687 netdev_warn(dev
, "Wrong NCSI state 0x%x in config\n",
694 ncsi_report_link(ndp
, true);
697 static int ncsi_choose_active_channel(struct ncsi_dev_priv
*ndp
)
699 struct ncsi_package
*np
;
700 struct ncsi_channel
*nc
, *found
;
701 struct ncsi_channel_mode
*ncm
;
704 /* The search is done once an inactive channel with up
708 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
709 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
710 if (!list_empty(&nc
->link
) ||
711 nc
->state
!= NCSI_CHANNEL_INACTIVE
)
717 ncm
= &nc
->modes
[NCSI_MODE_LINK
];
718 if (ncm
->data
[2] & 0x1) {
726 ncsi_report_link(ndp
, true);
731 spin_lock_irqsave(&ndp
->lock
, flags
);
732 list_add_tail_rcu(&found
->link
, &ndp
->channel_queue
);
733 spin_unlock_irqrestore(&ndp
->lock
, flags
);
735 return ncsi_process_next_channel(ndp
);
738 static bool ncsi_check_hwa(struct ncsi_dev_priv
*ndp
)
740 struct ncsi_package
*np
;
741 struct ncsi_channel
*nc
;
744 /* The hardware arbitration is disabled if any one channel
745 * doesn't support explicitly.
747 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
748 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
749 cap
= nc
->caps
[NCSI_CAP_GENERIC
].cap
;
750 if (!(cap
& NCSI_CAP_GENERIC_HWA
) ||
751 (cap
& NCSI_CAP_GENERIC_HWA_MASK
) !=
752 NCSI_CAP_GENERIC_HWA_SUPPORT
) {
753 ndp
->flags
&= ~NCSI_DEV_HWA
;
759 ndp
->flags
|= NCSI_DEV_HWA
;
763 static int ncsi_enable_hwa(struct ncsi_dev_priv
*ndp
)
765 struct ncsi_package
*np
;
766 struct ncsi_channel
*nc
;
769 /* Move all available channels to processing queue */
770 spin_lock_irqsave(&ndp
->lock
, flags
);
771 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
772 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
773 WARN_ON_ONCE(nc
->state
!= NCSI_CHANNEL_INACTIVE
||
774 !list_empty(&nc
->link
));
775 ncsi_stop_channel_monitor(nc
);
776 list_add_tail_rcu(&nc
->link
, &ndp
->channel_queue
);
779 spin_unlock_irqrestore(&ndp
->lock
, flags
);
781 /* We can have no channels in extremely case */
782 if (list_empty(&ndp
->channel_queue
)) {
783 ncsi_report_link(ndp
, false);
787 return ncsi_process_next_channel(ndp
);
790 static void ncsi_probe_channel(struct ncsi_dev_priv
*ndp
)
792 struct ncsi_dev
*nd
= &ndp
->ndev
;
793 struct ncsi_package
*np
;
794 struct ncsi_channel
*nc
;
795 struct ncsi_cmd_arg nca
;
802 case ncsi_dev_state_probe
:
803 nd
->state
= ncsi_dev_state_probe_deselect
;
805 case ncsi_dev_state_probe_deselect
:
806 ndp
->pending_req_num
= 8;
808 /* Deselect all possible packages */
809 nca
.type
= NCSI_PKT_CMD_DP
;
811 for (index
= 0; index
< 8; index
++) {
813 ret
= ncsi_xmit_cmd(&nca
);
818 nd
->state
= ncsi_dev_state_probe_package
;
820 case ncsi_dev_state_probe_package
:
821 ndp
->pending_req_num
= 16;
823 /* Select all possible packages */
824 nca
.type
= NCSI_PKT_CMD_SP
;
827 for (index
= 0; index
< 8; index
++) {
829 ret
= ncsi_xmit_cmd(&nca
);
834 /* Disable all possible packages */
835 nca
.type
= NCSI_PKT_CMD_DP
;
836 for (index
= 0; index
< 8; index
++) {
838 ret
= ncsi_xmit_cmd(&nca
);
843 nd
->state
= ncsi_dev_state_probe_channel
;
845 case ncsi_dev_state_probe_channel
:
846 if (!ndp
->active_package
)
847 ndp
->active_package
= list_first_or_null_rcu(
848 &ndp
->packages
, struct ncsi_package
, node
);
849 else if (list_is_last(&ndp
->active_package
->node
,
851 ndp
->active_package
= NULL
;
853 ndp
->active_package
= list_next_entry(
854 ndp
->active_package
, node
);
856 /* All available packages and channels are enumerated. The
857 * enumeration happens for once when the NCSI interface is
858 * started. So we need continue to start the interface after
861 * We have to choose an active channel before configuring it.
862 * Note that we possibly don't have active channel in extreme
865 if (!ndp
->active_package
) {
866 ndp
->flags
|= NCSI_DEV_PROBED
;
867 if (ncsi_check_hwa(ndp
))
868 ncsi_enable_hwa(ndp
);
870 ncsi_choose_active_channel(ndp
);
874 /* Select the active package */
875 ndp
->pending_req_num
= 1;
876 nca
.type
= NCSI_PKT_CMD_SP
;
878 nca
.package
= ndp
->active_package
->id
;
880 ret
= ncsi_xmit_cmd(&nca
);
884 nd
->state
= ncsi_dev_state_probe_cis
;
886 case ncsi_dev_state_probe_cis
:
887 ndp
->pending_req_num
= 32;
889 /* Clear initial state */
890 nca
.type
= NCSI_PKT_CMD_CIS
;
891 nca
.package
= ndp
->active_package
->id
;
892 for (index
= 0; index
< 0x20; index
++) {
894 ret
= ncsi_xmit_cmd(&nca
);
899 nd
->state
= ncsi_dev_state_probe_gvi
;
901 case ncsi_dev_state_probe_gvi
:
902 case ncsi_dev_state_probe_gc
:
903 case ncsi_dev_state_probe_gls
:
904 np
= ndp
->active_package
;
905 ndp
->pending_req_num
= np
->channel_num
;
907 /* Retrieve version, capability or link status */
908 if (nd
->state
== ncsi_dev_state_probe_gvi
)
909 nca
.type
= NCSI_PKT_CMD_GVI
;
910 else if (nd
->state
== ncsi_dev_state_probe_gc
)
911 nca
.type
= NCSI_PKT_CMD_GC
;
913 nca
.type
= NCSI_PKT_CMD_GLS
;
915 nca
.package
= np
->id
;
916 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
917 nca
.channel
= nc
->id
;
918 ret
= ncsi_xmit_cmd(&nca
);
923 if (nd
->state
== ncsi_dev_state_probe_gvi
)
924 nd
->state
= ncsi_dev_state_probe_gc
;
925 else if (nd
->state
== ncsi_dev_state_probe_gc
)
926 nd
->state
= ncsi_dev_state_probe_gls
;
928 nd
->state
= ncsi_dev_state_probe_dp
;
930 case ncsi_dev_state_probe_dp
:
931 ndp
->pending_req_num
= 1;
933 /* Deselect the active package */
934 nca
.type
= NCSI_PKT_CMD_DP
;
935 nca
.package
= ndp
->active_package
->id
;
937 ret
= ncsi_xmit_cmd(&nca
);
941 /* Scan channels in next package */
942 nd
->state
= ncsi_dev_state_probe_channel
;
945 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%0x in enumeration\n",
951 ncsi_report_link(ndp
, true);
954 static void ncsi_dev_work(struct work_struct
*work
)
956 struct ncsi_dev_priv
*ndp
= container_of(work
,
957 struct ncsi_dev_priv
, work
);
958 struct ncsi_dev
*nd
= &ndp
->ndev
;
960 switch (nd
->state
& ncsi_dev_state_major
) {
961 case ncsi_dev_state_probe
:
962 ncsi_probe_channel(ndp
);
964 case ncsi_dev_state_suspend
:
965 ncsi_suspend_channel(ndp
);
967 case ncsi_dev_state_config
:
968 ncsi_configure_channel(ndp
);
971 netdev_warn(nd
->dev
, "Wrong NCSI state 0x%x in workqueue\n",
976 int ncsi_process_next_channel(struct ncsi_dev_priv
*ndp
)
978 struct ncsi_channel
*nc
;
982 spin_lock_irqsave(&ndp
->lock
, flags
);
983 nc
= list_first_or_null_rcu(&ndp
->channel_queue
,
984 struct ncsi_channel
, link
);
986 spin_unlock_irqrestore(&ndp
->lock
, flags
);
990 old_state
= xchg(&nc
->state
, NCSI_CHANNEL_INVISIBLE
);
991 list_del_init(&nc
->link
);
993 spin_unlock_irqrestore(&ndp
->lock
, flags
);
995 ndp
->active_channel
= nc
;
996 ndp
->active_package
= nc
->package
;
999 case NCSI_CHANNEL_INACTIVE
:
1000 ndp
->ndev
.state
= ncsi_dev_state_config
;
1001 ncsi_configure_channel(ndp
);
1003 case NCSI_CHANNEL_ACTIVE
:
1004 ndp
->ndev
.state
= ncsi_dev_state_suspend
;
1005 ncsi_suspend_channel(ndp
);
1008 netdev_err(ndp
->ndev
.dev
, "Invalid state 0x%x on %d:%d\n",
1009 nc
->state
, nc
->package
->id
, nc
->id
);
1010 ncsi_report_link(ndp
, false);
1017 ndp
->active_channel
= NULL
;
1018 ndp
->active_package
= NULL
;
1019 if (ndp
->flags
& NCSI_DEV_RESHUFFLE
) {
1020 ndp
->flags
&= ~NCSI_DEV_RESHUFFLE
;
1021 return ncsi_choose_active_channel(ndp
);
1024 ncsi_report_link(ndp
, false);
1028 #if IS_ENABLED(CONFIG_IPV6)
1029 static int ncsi_inet6addr_event(struct notifier_block
*this,
1030 unsigned long event
, void *data
)
1032 struct inet6_ifaddr
*ifa
= data
;
1033 struct net_device
*dev
= ifa
->idev
->dev
;
1034 struct ncsi_dev
*nd
= ncsi_find_dev(dev
);
1035 struct ncsi_dev_priv
*ndp
= nd
? TO_NCSI_DEV_PRIV(nd
) : NULL
;
1036 struct ncsi_package
*np
;
1037 struct ncsi_channel
*nc
;
1038 struct ncsi_cmd_arg nca
;
1042 if (!ndp
|| (ipv6_addr_type(&ifa
->addr
) &
1043 (IPV6_ADDR_LINKLOCAL
| IPV6_ADDR_LOOPBACK
)))
1048 action
= (++ndp
->inet6_addr_num
) == 1;
1049 nca
.type
= NCSI_PKT_CMD_EGMF
;
1052 action
= (--ndp
->inet6_addr_num
== 0);
1053 nca
.type
= NCSI_PKT_CMD_DGMF
;
1059 /* We might not have active channel or packages. The IPv6
1060 * required multicast will be enabled when active channel
1061 * or packages are chosen.
1063 np
= ndp
->active_package
;
1064 nc
= ndp
->active_channel
;
1065 if (!action
|| !np
|| !nc
)
1068 /* We needn't enable or disable it if the function isn't supported */
1069 if (!(nc
->caps
[NCSI_CAP_GENERIC
].cap
& NCSI_CAP_GENERIC_MC
))
1074 nca
.package
= np
->id
;
1075 nca
.channel
= nc
->id
;
1076 nca
.dwords
[0] = nc
->caps
[NCSI_CAP_MC
].cap
;
1077 ret
= ncsi_xmit_cmd(&nca
);
1079 netdev_warn(dev
, "Fail to %s global multicast filter (%d)\n",
1080 (event
== NETDEV_UP
) ? "enable" : "disable", ret
);
1087 static struct notifier_block ncsi_inet6addr_notifier
= {
1088 .notifier_call
= ncsi_inet6addr_event
,
1090 #endif /* CONFIG_IPV6 */
1092 struct ncsi_dev
*ncsi_register_dev(struct net_device
*dev
,
1093 void (*handler
)(struct ncsi_dev
*ndev
))
1095 struct ncsi_dev_priv
*ndp
;
1096 struct ncsi_dev
*nd
;
1097 unsigned long flags
;
1100 /* Check if the device has been registered or not */
1101 nd
= ncsi_find_dev(dev
);
1105 /* Create NCSI device */
1106 ndp
= kzalloc(sizeof(*ndp
), GFP_ATOMIC
);
1111 nd
->state
= ncsi_dev_state_registered
;
1113 nd
->handler
= handler
;
1114 ndp
->pending_req_num
= 0;
1115 INIT_LIST_HEAD(&ndp
->channel_queue
);
1116 INIT_WORK(&ndp
->work
, ncsi_dev_work
);
1118 /* Initialize private NCSI device */
1119 spin_lock_init(&ndp
->lock
);
1120 INIT_LIST_HEAD(&ndp
->packages
);
1121 ndp
->request_id
= 0;
1122 for (i
= 0; i
< ARRAY_SIZE(ndp
->requests
); i
++) {
1123 ndp
->requests
[i
].id
= i
;
1124 ndp
->requests
[i
].ndp
= ndp
;
1125 setup_timer(&ndp
->requests
[i
].timer
,
1126 ncsi_request_timeout
,
1127 (unsigned long)&ndp
->requests
[i
]);
1130 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1131 #if IS_ENABLED(CONFIG_IPV6)
1132 ndp
->inet6_addr_num
= 0;
1133 if (list_empty(&ncsi_dev_list
))
1134 register_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1136 list_add_tail_rcu(&ndp
->node
, &ncsi_dev_list
);
1137 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1139 /* Register NCSI packet Rx handler */
1140 ndp
->ptype
.type
= cpu_to_be16(ETH_P_NCSI
);
1141 ndp
->ptype
.func
= ncsi_rcv_rsp
;
1142 ndp
->ptype
.dev
= dev
;
1143 dev_add_pack(&ndp
->ptype
);
1147 EXPORT_SYMBOL_GPL(ncsi_register_dev
);
1149 int ncsi_start_dev(struct ncsi_dev
*nd
)
1151 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1152 struct ncsi_package
*np
;
1153 struct ncsi_channel
*nc
;
1156 if (nd
->state
!= ncsi_dev_state_registered
&&
1157 nd
->state
!= ncsi_dev_state_functional
)
1160 if (!(ndp
->flags
& NCSI_DEV_PROBED
)) {
1161 nd
->state
= ncsi_dev_state_probe
;
1162 schedule_work(&ndp
->work
);
1166 /* Reset channel's state and start over */
1167 NCSI_FOR_EACH_PACKAGE(ndp
, np
) {
1168 NCSI_FOR_EACH_CHANNEL(np
, nc
) {
1169 old_state
= xchg(&nc
->state
, NCSI_CHANNEL_INACTIVE
);
1170 WARN_ON_ONCE(!list_empty(&nc
->link
) ||
1171 old_state
== NCSI_CHANNEL_INVISIBLE
);
1175 if (ndp
->flags
& NCSI_DEV_HWA
)
1176 ret
= ncsi_enable_hwa(ndp
);
1178 ret
= ncsi_choose_active_channel(ndp
);
1182 EXPORT_SYMBOL_GPL(ncsi_start_dev
);
1184 void ncsi_unregister_dev(struct ncsi_dev
*nd
)
1186 struct ncsi_dev_priv
*ndp
= TO_NCSI_DEV_PRIV(nd
);
1187 struct ncsi_package
*np
, *tmp
;
1188 unsigned long flags
;
1190 dev_remove_pack(&ndp
->ptype
);
1192 list_for_each_entry_safe(np
, tmp
, &ndp
->packages
, node
)
1193 ncsi_remove_package(np
);
1195 spin_lock_irqsave(&ncsi_dev_lock
, flags
);
1196 list_del_rcu(&ndp
->node
);
1197 #if IS_ENABLED(CONFIG_IPV6)
1198 if (list_empty(&ncsi_dev_list
))
1199 unregister_inet6addr_notifier(&ncsi_inet6addr_notifier
);
1201 spin_unlock_irqrestore(&ncsi_dev_lock
, flags
);
1205 EXPORT_SYMBOL_GPL(ncsi_unregister_dev
);