2 * net/tipc/name_table.c: TIPC name table code
4 * Copyright (c) 2000-2006, 2014-2015, Ericsson AB
5 * Copyright (c) 2004-2008, 2010-2014, Wind River Systems
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the names of the copyright holders nor the names of its
17 * contributors may be used to endorse or promote products derived from
18 * this software without specific prior written permission.
20 * Alternatively, this software may be distributed under the terms of the
21 * GNU General Public License ("GPL") version 2 as published by the Free
22 * Software Foundation.
24 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
25 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
28 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34 * POSSIBILITY OF SUCH DAMAGE.
40 #include "name_table.h"
41 #include "name_distr.h"
46 #include <net/genetlink.h>
48 #define TIPC_NAMETBL_SIZE 1024 /* must be a power of 2 */
51 * struct name_info - name sequence publication info
52 * @node_list: circular list of publications made by own node
53 * @cluster_list: circular list of publications made by own cluster
54 * @zone_list: circular list of publications made by own zone
55 * @node_list_size: number of entries in "node_list"
56 * @cluster_list_size: number of entries in "cluster_list"
57 * @zone_list_size: number of entries in "zone_list"
59 * Note: The zone list always contains at least one entry, since all
60 * publications of the associated name sequence belong to it.
61 * (The cluster and node lists may be empty.)
64 struct list_head node_list
;
65 struct list_head cluster_list
;
66 struct list_head zone_list
;
68 u32 cluster_list_size
;
73 * struct sub_seq - container for all published instances of a name sequence
74 * @lower: name sequence lower bound
75 * @upper: name sequence upper bound
76 * @info: pointer to name sequence publication info
81 struct name_info
*info
;
85 * struct name_seq - container for all published instances of a name type
86 * @type: 32 bit 'type' value for name sequence
87 * @sseq: pointer to dynamically-sized array of sub-sequences of this 'type';
88 * sub-sequences are sorted in ascending order
89 * @alloc: number of sub-sequences currently in array
90 * @first_free: array index of first unused sub-sequence entry
91 * @ns_list: links to adjacent name sequences in hash chain
92 * @subscriptions: list of subscriptions for this 'type'
93 * @lock: spinlock controlling access to publication lists of all sub-sequences
94 * @rcu: RCU callback head used for deferred freeing
98 struct sub_seq
*sseqs
;
101 struct hlist_node ns_list
;
102 struct list_head subscriptions
;
107 static int hash(int x
)
109 return x
& (TIPC_NAMETBL_SIZE
- 1);
113 * publ_create - create a publication structure
115 static struct publication
*publ_create(u32 type
, u32 lower
, u32 upper
,
116 u32 scope
, u32 node
, u32 port_ref
,
119 struct publication
*publ
= kzalloc(sizeof(*publ
), GFP_ATOMIC
);
121 pr_warn("Publication creation failure, no memory\n");
130 publ
->ref
= port_ref
;
132 INIT_LIST_HEAD(&publ
->pport_list
);
137 * tipc_subseq_alloc - allocate a specified number of sub-sequence structures
139 static struct sub_seq
*tipc_subseq_alloc(u32 cnt
)
141 return kcalloc(cnt
, sizeof(struct sub_seq
), GFP_ATOMIC
);
145 * tipc_nameseq_create - create a name sequence structure for the specified 'type'
147 * Allocates a single sub-sequence structure and sets it to all 0's.
149 static struct name_seq
*tipc_nameseq_create(u32 type
, struct hlist_head
*seq_head
)
151 struct name_seq
*nseq
= kzalloc(sizeof(*nseq
), GFP_ATOMIC
);
152 struct sub_seq
*sseq
= tipc_subseq_alloc(1);
154 if (!nseq
|| !sseq
) {
155 pr_warn("Name sequence creation failed, no memory\n");
161 spin_lock_init(&nseq
->lock
);
165 INIT_HLIST_NODE(&nseq
->ns_list
);
166 INIT_LIST_HEAD(&nseq
->subscriptions
);
167 hlist_add_head_rcu(&nseq
->ns_list
, seq_head
);
172 * nameseq_find_subseq - find sub-sequence (if any) matching a name instance
174 * Very time-critical, so binary searches through sub-sequence array.
176 static struct sub_seq
*nameseq_find_subseq(struct name_seq
*nseq
,
179 struct sub_seq
*sseqs
= nseq
->sseqs
;
181 int high
= nseq
->first_free
- 1;
184 while (low
<= high
) {
185 mid
= (low
+ high
) / 2;
186 if (instance
< sseqs
[mid
].lower
)
188 else if (instance
> sseqs
[mid
].upper
)
197 * nameseq_locate_subseq - determine position of name instance in sub-sequence
199 * Returns index in sub-sequence array of the entry that contains the specified
200 * instance value; if no entry contains that value, returns the position
201 * where a new entry for it would be inserted in the array.
203 * Note: Similar to binary search code for locating a sub-sequence.
205 static u32
nameseq_locate_subseq(struct name_seq
*nseq
, u32 instance
)
207 struct sub_seq
*sseqs
= nseq
->sseqs
;
209 int high
= nseq
->first_free
- 1;
212 while (low
<= high
) {
213 mid
= (low
+ high
) / 2;
214 if (instance
< sseqs
[mid
].lower
)
216 else if (instance
> sseqs
[mid
].upper
)
225 * tipc_nameseq_insert_publ
227 static struct publication
*tipc_nameseq_insert_publ(struct net
*net
,
228 struct name_seq
*nseq
,
230 u32 upper
, u32 scope
,
231 u32 node
, u32 port
, u32 key
)
233 struct tipc_subscription
*s
;
234 struct tipc_subscription
*st
;
235 struct publication
*publ
;
236 struct sub_seq
*sseq
;
237 struct name_info
*info
;
238 int created_subseq
= 0;
240 sseq
= nameseq_find_subseq(nseq
, lower
);
243 /* Lower end overlaps existing entry => need an exact match */
244 if ((sseq
->lower
!= lower
) || (sseq
->upper
!= upper
)) {
250 /* Check if an identical publication already exists */
251 list_for_each_entry(publ
, &info
->zone_list
, zone_list
) {
252 if ((publ
->ref
== port
) && (publ
->key
== key
) &&
253 (!publ
->node
|| (publ
->node
== node
)))
258 struct sub_seq
*freesseq
;
260 /* Find where lower end should be inserted */
261 inspos
= nameseq_locate_subseq(nseq
, lower
);
263 /* Fail if upper end overlaps into an existing entry */
264 if ((inspos
< nseq
->first_free
) &&
265 (upper
>= nseq
->sseqs
[inspos
].lower
)) {
269 /* Ensure there is space for new sub-sequence */
270 if (nseq
->first_free
== nseq
->alloc
) {
271 struct sub_seq
*sseqs
= tipc_subseq_alloc(nseq
->alloc
* 2);
274 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
278 memcpy(sseqs
, nseq
->sseqs
,
279 nseq
->alloc
* sizeof(struct sub_seq
));
285 info
= kzalloc(sizeof(*info
), GFP_ATOMIC
);
287 pr_warn("Cannot publish {%u,%u,%u}, no memory\n",
292 INIT_LIST_HEAD(&info
->node_list
);
293 INIT_LIST_HEAD(&info
->cluster_list
);
294 INIT_LIST_HEAD(&info
->zone_list
);
296 /* Insert new sub-sequence */
297 sseq
= &nseq
->sseqs
[inspos
];
298 freesseq
= &nseq
->sseqs
[nseq
->first_free
];
299 memmove(sseq
+ 1, sseq
, (freesseq
- sseq
) * sizeof(*sseq
));
300 memset(sseq
, 0, sizeof(*sseq
));
308 /* Insert a publication */
309 publ
= publ_create(type
, lower
, upper
, scope
, node
, port
, key
);
313 list_add(&publ
->zone_list
, &info
->zone_list
);
314 info
->zone_list_size
++;
316 if (in_own_cluster(net
, node
)) {
317 list_add(&publ
->cluster_list
, &info
->cluster_list
);
318 info
->cluster_list_size
++;
321 if (in_own_node(net
, node
)) {
322 list_add(&publ
->node_list
, &info
->node_list
);
323 info
->node_list_size
++;
326 /* Any subscriptions waiting for notification? */
327 list_for_each_entry_safe(s
, st
, &nseq
->subscriptions
, nameseq_list
) {
328 tipc_subscrp_report_overlap(s
, publ
->lower
, publ
->upper
,
329 TIPC_PUBLISHED
, publ
->ref
,
330 publ
->node
, created_subseq
);
336 * tipc_nameseq_remove_publ
338 * NOTE: There may be cases where TIPC is asked to remove a publication
339 * that is not in the name table. For example, if another node issues a
340 * publication for a name sequence that overlaps an existing name sequence
341 * the publication will not be recorded, which means the publication won't
342 * be found when the name sequence is later withdrawn by that node.
343 * A failed withdraw request simply returns a failure indication and lets the
344 * caller issue any error or warning messages associated with such a problem.
346 static struct publication
*tipc_nameseq_remove_publ(struct net
*net
,
347 struct name_seq
*nseq
,
351 struct publication
*publ
;
352 struct sub_seq
*sseq
= nameseq_find_subseq(nseq
, inst
);
353 struct name_info
*info
;
354 struct sub_seq
*free
;
355 struct tipc_subscription
*s
, *st
;
356 int removed_subseq
= 0;
363 /* Locate publication, if it exists */
364 list_for_each_entry(publ
, &info
->zone_list
, zone_list
) {
365 if ((publ
->key
== key
) && (publ
->ref
== ref
) &&
366 (!publ
->node
|| (publ
->node
== node
)))
372 /* Remove publication from zone scope list */
373 list_del(&publ
->zone_list
);
374 info
->zone_list_size
--;
376 /* Remove publication from cluster scope list, if present */
377 if (in_own_cluster(net
, node
)) {
378 list_del(&publ
->cluster_list
);
379 info
->cluster_list_size
--;
382 /* Remove publication from node scope list, if present */
383 if (in_own_node(net
, node
)) {
384 list_del(&publ
->node_list
);
385 info
->node_list_size
--;
388 /* Contract subseq list if no more publications for that subseq */
389 if (list_empty(&info
->zone_list
)) {
391 free
= &nseq
->sseqs
[nseq
->first_free
--];
392 memmove(sseq
, sseq
+ 1, (free
- (sseq
+ 1)) * sizeof(*sseq
));
396 /* Notify any waiting subscriptions */
397 list_for_each_entry_safe(s
, st
, &nseq
->subscriptions
, nameseq_list
) {
398 tipc_subscrp_report_overlap(s
, publ
->lower
, publ
->upper
,
399 TIPC_WITHDRAWN
, publ
->ref
,
400 publ
->node
, removed_subseq
);
407 * tipc_nameseq_subscribe - attach a subscription, and issue
408 * the prescribed number of events if there is any sub-
409 * sequence overlapping with the requested sequence
411 static void tipc_nameseq_subscribe(struct name_seq
*nseq
,
412 struct tipc_subscription
*s
)
414 struct sub_seq
*sseq
= nseq
->sseqs
;
415 struct tipc_name_seq ns
;
417 tipc_subscrp_convert_seq(&s
->evt
.s
.seq
, s
->swap
, &ns
);
420 list_add(&s
->nameseq_list
, &nseq
->subscriptions
);
425 while (sseq
!= &nseq
->sseqs
[nseq
->first_free
]) {
426 if (tipc_subscrp_check_overlap(&ns
, sseq
->lower
, sseq
->upper
)) {
427 struct publication
*crs
;
428 struct name_info
*info
= sseq
->info
;
431 list_for_each_entry(crs
, &info
->zone_list
, zone_list
) {
432 tipc_subscrp_report_overlap(s
, sseq
->lower
,
444 static struct name_seq
*nametbl_find_seq(struct net
*net
, u32 type
)
446 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
447 struct hlist_head
*seq_head
;
450 seq_head
= &tn
->nametbl
->seq_hlist
[hash(type
)];
451 hlist_for_each_entry_rcu(ns
, seq_head
, ns_list
) {
452 if (ns
->type
== type
)
459 struct publication
*tipc_nametbl_insert_publ(struct net
*net
, u32 type
,
460 u32 lower
, u32 upper
, u32 scope
,
461 u32 node
, u32 port
, u32 key
)
463 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
464 struct publication
*publ
;
465 struct name_seq
*seq
= nametbl_find_seq(net
, type
);
466 int index
= hash(type
);
468 if ((scope
< TIPC_ZONE_SCOPE
) || (scope
> TIPC_NODE_SCOPE
) ||
470 pr_debug("Failed to publish illegal {%u,%u,%u} with scope %u\n",
471 type
, lower
, upper
, scope
);
476 seq
= tipc_nameseq_create(type
, &tn
->nametbl
->seq_hlist
[index
]);
480 spin_lock_bh(&seq
->lock
);
481 publ
= tipc_nameseq_insert_publ(net
, seq
, type
, lower
, upper
,
482 scope
, node
, port
, key
);
483 spin_unlock_bh(&seq
->lock
);
487 struct publication
*tipc_nametbl_remove_publ(struct net
*net
, u32 type
,
488 u32 lower
, u32 node
, u32 ref
,
491 struct publication
*publ
;
492 struct name_seq
*seq
= nametbl_find_seq(net
, type
);
497 spin_lock_bh(&seq
->lock
);
498 publ
= tipc_nameseq_remove_publ(net
, seq
, lower
, node
, ref
, key
);
499 if (!seq
->first_free
&& list_empty(&seq
->subscriptions
)) {
500 hlist_del_init_rcu(&seq
->ns_list
);
502 spin_unlock_bh(&seq
->lock
);
506 spin_unlock_bh(&seq
->lock
);
511 * tipc_nametbl_translate - perform name translation
513 * On entry, 'destnode' is the search domain used during translation.
516 * - if name translation is deferred to another node/cluster/zone,
517 * leaves 'destnode' unchanged (will be non-zero) and returns 0
518 * - if name translation is attempted and succeeds, sets 'destnode'
519 * to publishing node and returns port reference (will be non-zero)
520 * - if name translation is attempted and fails, sets 'destnode' to 0
523 u32
tipc_nametbl_translate(struct net
*net
, u32 type
, u32 instance
,
526 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
527 struct sub_seq
*sseq
;
528 struct name_info
*info
;
529 struct publication
*publ
;
530 struct name_seq
*seq
;
534 if (!tipc_in_scope(*destnode
, tn
->own_addr
))
538 seq
= nametbl_find_seq(net
, type
);
541 spin_lock_bh(&seq
->lock
);
542 sseq
= nameseq_find_subseq(seq
, instance
);
547 /* Closest-First Algorithm */
548 if (likely(!*destnode
)) {
549 if (!list_empty(&info
->node_list
)) {
550 publ
= list_first_entry(&info
->node_list
,
553 list_move_tail(&publ
->node_list
,
555 } else if (!list_empty(&info
->cluster_list
)) {
556 publ
= list_first_entry(&info
->cluster_list
,
559 list_move_tail(&publ
->cluster_list
,
560 &info
->cluster_list
);
562 publ
= list_first_entry(&info
->zone_list
,
565 list_move_tail(&publ
->zone_list
,
570 /* Round-Robin Algorithm */
571 else if (*destnode
== tn
->own_addr
) {
572 if (list_empty(&info
->node_list
))
574 publ
= list_first_entry(&info
->node_list
, struct publication
,
576 list_move_tail(&publ
->node_list
, &info
->node_list
);
577 } else if (in_own_cluster_exact(net
, *destnode
)) {
578 if (list_empty(&info
->cluster_list
))
580 publ
= list_first_entry(&info
->cluster_list
, struct publication
,
582 list_move_tail(&publ
->cluster_list
, &info
->cluster_list
);
584 publ
= list_first_entry(&info
->zone_list
, struct publication
,
586 list_move_tail(&publ
->zone_list
, &info
->zone_list
);
592 spin_unlock_bh(&seq
->lock
);
600 * tipc_nametbl_mc_translate - find multicast destinations
602 * Creates list of all local ports that overlap the given multicast address;
603 * also determines if any off-node ports overlap.
605 * Note: Publications with a scope narrower than 'limit' are ignored.
606 * (i.e. local node-scope publications mustn't receive messages arriving
607 * from another node, even if the multcast link brought it here)
609 * Returns non-zero if any off-node ports overlap
611 int tipc_nametbl_mc_translate(struct net
*net
, u32 type
, u32 lower
, u32 upper
,
612 u32 limit
, struct list_head
*dports
)
614 struct name_seq
*seq
;
615 struct sub_seq
*sseq
;
616 struct sub_seq
*sseq_stop
;
617 struct name_info
*info
;
621 seq
= nametbl_find_seq(net
, type
);
625 spin_lock_bh(&seq
->lock
);
626 sseq
= seq
->sseqs
+ nameseq_locate_subseq(seq
, lower
);
627 sseq_stop
= seq
->sseqs
+ seq
->first_free
;
628 for (; sseq
!= sseq_stop
; sseq
++) {
629 struct publication
*publ
;
631 if (sseq
->lower
> upper
)
635 list_for_each_entry(publ
, &info
->node_list
, node_list
) {
636 if (publ
->scope
<= limit
)
637 u32_push(dports
, publ
->ref
);
640 if (info
->cluster_list_size
!= info
->node_list_size
)
643 spin_unlock_bh(&seq
->lock
);
649 /* tipc_nametbl_lookup_dst_nodes - find broadcast destination nodes
650 * - Creates list of nodes that overlap the given multicast address
651 * - Determines if any node local ports overlap
653 void tipc_nametbl_lookup_dst_nodes(struct net
*net
, u32 type
, u32 lower
,
654 u32 upper
, u32 domain
,
655 struct tipc_nlist
*nodes
)
657 struct sub_seq
*sseq
, *stop
;
658 struct publication
*publ
;
659 struct name_info
*info
;
660 struct name_seq
*seq
;
663 seq
= nametbl_find_seq(net
, type
);
667 spin_lock_bh(&seq
->lock
);
668 sseq
= seq
->sseqs
+ nameseq_locate_subseq(seq
, lower
);
669 stop
= seq
->sseqs
+ seq
->first_free
;
670 for (; sseq
->lower
<= upper
&& sseq
!= stop
; sseq
++) {
672 list_for_each_entry(publ
, &info
->zone_list
, zone_list
) {
673 if (tipc_in_scope(domain
, publ
->node
))
674 tipc_nlist_add(nodes
, publ
->node
);
677 spin_unlock_bh(&seq
->lock
);
683 * tipc_nametbl_publish - add name publication to network name tables
685 struct publication
*tipc_nametbl_publish(struct net
*net
, u32 type
, u32 lower
,
686 u32 upper
, u32 scope
, u32 port_ref
,
689 struct publication
*publ
;
690 struct sk_buff
*buf
= NULL
;
691 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
693 spin_lock_bh(&tn
->nametbl_lock
);
694 if (tn
->nametbl
->local_publ_count
>= TIPC_MAX_PUBLICATIONS
) {
695 pr_warn("Publication failed, local publication limit reached (%u)\n",
696 TIPC_MAX_PUBLICATIONS
);
697 spin_unlock_bh(&tn
->nametbl_lock
);
701 publ
= tipc_nametbl_insert_publ(net
, type
, lower
, upper
, scope
,
702 tn
->own_addr
, port_ref
, key
);
704 tn
->nametbl
->local_publ_count
++;
705 buf
= tipc_named_publish(net
, publ
);
706 /* Any pending external events? */
707 tipc_named_process_backlog(net
);
709 spin_unlock_bh(&tn
->nametbl_lock
);
712 tipc_node_broadcast(net
, buf
);
717 * tipc_nametbl_withdraw - withdraw name publication from network name tables
719 int tipc_nametbl_withdraw(struct net
*net
, u32 type
, u32 lower
, u32 ref
,
722 struct publication
*publ
;
723 struct sk_buff
*skb
= NULL
;
724 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
726 spin_lock_bh(&tn
->nametbl_lock
);
727 publ
= tipc_nametbl_remove_publ(net
, type
, lower
, tn
->own_addr
,
730 tn
->nametbl
->local_publ_count
--;
731 skb
= tipc_named_withdraw(net
, publ
);
732 /* Any pending external events? */
733 tipc_named_process_backlog(net
);
734 list_del_init(&publ
->pport_list
);
735 kfree_rcu(publ
, rcu
);
737 pr_err("Unable to remove local publication\n"
738 "(type=%u, lower=%u, ref=%u, key=%u)\n",
739 type
, lower
, ref
, key
);
741 spin_unlock_bh(&tn
->nametbl_lock
);
744 tipc_node_broadcast(net
, skb
);
751 * tipc_nametbl_subscribe - add a subscription object to the name table
753 void tipc_nametbl_subscribe(struct tipc_subscription
*s
)
755 struct tipc_net
*tn
= net_generic(s
->net
, tipc_net_id
);
756 u32 type
= tipc_subscrp_convert_seq_type(s
->evt
.s
.seq
.type
, s
->swap
);
757 int index
= hash(type
);
758 struct name_seq
*seq
;
759 struct tipc_name_seq ns
;
761 spin_lock_bh(&tn
->nametbl_lock
);
762 seq
= nametbl_find_seq(s
->net
, type
);
764 seq
= tipc_nameseq_create(type
, &tn
->nametbl
->seq_hlist
[index
]);
766 spin_lock_bh(&seq
->lock
);
767 tipc_nameseq_subscribe(seq
, s
);
768 spin_unlock_bh(&seq
->lock
);
770 tipc_subscrp_convert_seq(&s
->evt
.s
.seq
, s
->swap
, &ns
);
771 pr_warn("Failed to create subscription for {%u,%u,%u}\n",
772 ns
.type
, ns
.lower
, ns
.upper
);
774 spin_unlock_bh(&tn
->nametbl_lock
);
778 * tipc_nametbl_unsubscribe - remove a subscription object from name table
780 void tipc_nametbl_unsubscribe(struct tipc_subscription
*s
)
782 struct tipc_net
*tn
= net_generic(s
->net
, tipc_net_id
);
783 struct name_seq
*seq
;
784 u32 type
= tipc_subscrp_convert_seq_type(s
->evt
.s
.seq
.type
, s
->swap
);
786 spin_lock_bh(&tn
->nametbl_lock
);
787 seq
= nametbl_find_seq(s
->net
, type
);
789 spin_lock_bh(&seq
->lock
);
790 list_del_init(&s
->nameseq_list
);
792 if (!seq
->first_free
&& list_empty(&seq
->subscriptions
)) {
793 hlist_del_init_rcu(&seq
->ns_list
);
795 spin_unlock_bh(&seq
->lock
);
798 spin_unlock_bh(&seq
->lock
);
801 spin_unlock_bh(&tn
->nametbl_lock
);
804 int tipc_nametbl_init(struct net
*net
)
806 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
807 struct name_table
*tipc_nametbl
;
810 tipc_nametbl
= kzalloc(sizeof(*tipc_nametbl
), GFP_ATOMIC
);
814 for (i
= 0; i
< TIPC_NAMETBL_SIZE
; i
++)
815 INIT_HLIST_HEAD(&tipc_nametbl
->seq_hlist
[i
]);
817 INIT_LIST_HEAD(&tipc_nametbl
->publ_list
[TIPC_ZONE_SCOPE
]);
818 INIT_LIST_HEAD(&tipc_nametbl
->publ_list
[TIPC_CLUSTER_SCOPE
]);
819 INIT_LIST_HEAD(&tipc_nametbl
->publ_list
[TIPC_NODE_SCOPE
]);
820 tn
->nametbl
= tipc_nametbl
;
821 spin_lock_init(&tn
->nametbl_lock
);
826 * tipc_purge_publications - remove all publications for a given type
828 * tipc_nametbl_lock must be held when calling this function
830 static void tipc_purge_publications(struct net
*net
, struct name_seq
*seq
)
832 struct publication
*publ
, *safe
;
833 struct sub_seq
*sseq
;
834 struct name_info
*info
;
836 spin_lock_bh(&seq
->lock
);
839 list_for_each_entry_safe(publ
, safe
, &info
->zone_list
, zone_list
) {
840 tipc_nameseq_remove_publ(net
, seq
, publ
->lower
, publ
->node
,
841 publ
->ref
, publ
->key
);
842 kfree_rcu(publ
, rcu
);
844 hlist_del_init_rcu(&seq
->ns_list
);
846 spin_unlock_bh(&seq
->lock
);
851 void tipc_nametbl_stop(struct net
*net
)
854 struct name_seq
*seq
;
855 struct hlist_head
*seq_head
;
856 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
857 struct name_table
*tipc_nametbl
= tn
->nametbl
;
859 /* Verify name table is empty and purge any lingering
860 * publications, then release the name table
862 spin_lock_bh(&tn
->nametbl_lock
);
863 for (i
= 0; i
< TIPC_NAMETBL_SIZE
; i
++) {
864 if (hlist_empty(&tipc_nametbl
->seq_hlist
[i
]))
866 seq_head
= &tipc_nametbl
->seq_hlist
[i
];
867 hlist_for_each_entry_rcu(seq
, seq_head
, ns_list
) {
868 tipc_purge_publications(net
, seq
);
871 spin_unlock_bh(&tn
->nametbl_lock
);
878 static int __tipc_nl_add_nametable_publ(struct tipc_nl_msg
*msg
,
879 struct name_seq
*seq
,
880 struct sub_seq
*sseq
, u32
*last_publ
)
883 struct nlattr
*attrs
;
885 struct publication
*p
;
888 list_for_each_entry(p
, &sseq
->info
->zone_list
, zone_list
)
889 if (p
->key
== *last_publ
)
891 if (p
->key
!= *last_publ
)
894 p
= list_first_entry(&sseq
->info
->zone_list
, struct publication
,
898 list_for_each_entry_from(p
, &sseq
->info
->zone_list
, zone_list
) {
901 hdr
= genlmsg_put(msg
->skb
, msg
->portid
, msg
->seq
,
902 &tipc_genl_family
, NLM_F_MULTI
,
903 TIPC_NL_NAME_TABLE_GET
);
907 attrs
= nla_nest_start(msg
->skb
, TIPC_NLA_NAME_TABLE
);
911 publ
= nla_nest_start(msg
->skb
, TIPC_NLA_NAME_TABLE_PUBL
);
915 if (nla_put_u32(msg
->skb
, TIPC_NLA_PUBL_TYPE
, seq
->type
))
917 if (nla_put_u32(msg
->skb
, TIPC_NLA_PUBL_LOWER
, sseq
->lower
))
919 if (nla_put_u32(msg
->skb
, TIPC_NLA_PUBL_UPPER
, sseq
->upper
))
921 if (nla_put_u32(msg
->skb
, TIPC_NLA_PUBL_SCOPE
, p
->scope
))
923 if (nla_put_u32(msg
->skb
, TIPC_NLA_PUBL_NODE
, p
->node
))
925 if (nla_put_u32(msg
->skb
, TIPC_NLA_PUBL_REF
, p
->ref
))
927 if (nla_put_u32(msg
->skb
, TIPC_NLA_PUBL_KEY
, p
->key
))
930 nla_nest_end(msg
->skb
, publ
);
931 nla_nest_end(msg
->skb
, attrs
);
932 genlmsg_end(msg
->skb
, hdr
);
939 nla_nest_cancel(msg
->skb
, publ
);
941 nla_nest_cancel(msg
->skb
, attrs
);
943 genlmsg_cancel(msg
->skb
, hdr
);
948 static int __tipc_nl_subseq_list(struct tipc_nl_msg
*msg
, struct name_seq
*seq
,
949 u32
*last_lower
, u32
*last_publ
)
951 struct sub_seq
*sseq
;
952 struct sub_seq
*sseq_start
;
956 sseq_start
= nameseq_find_subseq(seq
, *last_lower
);
960 sseq_start
= seq
->sseqs
;
963 for (sseq
= sseq_start
; sseq
!= &seq
->sseqs
[seq
->first_free
]; sseq
++) {
964 err
= __tipc_nl_add_nametable_publ(msg
, seq
, sseq
, last_publ
);
966 *last_lower
= sseq
->lower
;
975 static int tipc_nl_seq_list(struct net
*net
, struct tipc_nl_msg
*msg
,
976 u32
*last_type
, u32
*last_lower
, u32
*last_publ
)
978 struct tipc_net
*tn
= net_generic(net
, tipc_net_id
);
979 struct hlist_head
*seq_head
;
980 struct name_seq
*seq
= NULL
;
985 i
= hash(*last_type
);
989 for (; i
< TIPC_NAMETBL_SIZE
; i
++) {
990 seq_head
= &tn
->nametbl
->seq_hlist
[i
];
993 seq
= nametbl_find_seq(net
, *last_type
);
997 hlist_for_each_entry_rcu(seq
, seq_head
, ns_list
)
1003 hlist_for_each_entry_from_rcu(seq
, ns_list
) {
1004 spin_lock_bh(&seq
->lock
);
1005 err
= __tipc_nl_subseq_list(msg
, seq
, last_lower
,
1009 *last_type
= seq
->type
;
1010 spin_unlock_bh(&seq
->lock
);
1013 spin_unlock_bh(&seq
->lock
);
1020 int tipc_nl_name_table_dump(struct sk_buff
*skb
, struct netlink_callback
*cb
)
1023 int done
= cb
->args
[3];
1024 u32 last_type
= cb
->args
[0];
1025 u32 last_lower
= cb
->args
[1];
1026 u32 last_publ
= cb
->args
[2];
1027 struct net
*net
= sock_net(skb
->sk
);
1028 struct tipc_nl_msg msg
;
1034 msg
.portid
= NETLINK_CB(cb
->skb
).portid
;
1035 msg
.seq
= cb
->nlh
->nlmsg_seq
;
1038 err
= tipc_nl_seq_list(net
, &msg
, &last_type
, &last_lower
, &last_publ
);
1041 } else if (err
!= -EMSGSIZE
) {
1042 /* We never set seq or call nl_dump_check_consistent() this
1043 * means that setting prev_seq here will cause the consistence
1044 * check to fail in the netlink callback handler. Resulting in
1045 * the NLMSG_DONE message having the NLM_F_DUMP_INTR flag set if
1052 cb
->args
[0] = last_type
;
1053 cb
->args
[1] = last_lower
;
1054 cb
->args
[2] = last_publ
;
1060 bool u32_find(struct list_head
*l
, u32 value
)
1062 struct u32_item
*item
;
1064 list_for_each_entry(item
, l
, list
) {
1065 if (item
->value
== value
)
1071 bool u32_push(struct list_head
*l
, u32 value
)
1073 struct u32_item
*item
;
1075 list_for_each_entry(item
, l
, list
) {
1076 if (item
->value
== value
)
1079 item
= kmalloc(sizeof(*item
), GFP_ATOMIC
);
1080 if (unlikely(!item
))
1083 item
->value
= value
;
1084 list_add(&item
->list
, l
);
1088 u32
u32_pop(struct list_head
*l
)
1090 struct u32_item
*item
;
1095 item
= list_first_entry(l
, typeof(*item
), list
);
1096 value
= item
->value
;
1097 list_del(&item
->list
);
1102 bool u32_del(struct list_head
*l
, u32 value
)
1104 struct u32_item
*item
, *tmp
;
1106 list_for_each_entry_safe(item
, tmp
, l
, list
) {
1107 if (item
->value
!= value
)
1109 list_del(&item
->list
);
1116 void u32_list_purge(struct list_head
*l
)
1118 struct u32_item
*item
, *tmp
;
1120 list_for_each_entry_safe(item
, tmp
, l
, list
) {
1121 list_del(&item
->list
);
1126 int u32_list_len(struct list_head
*l
)
1128 struct u32_item
*item
;
1131 list_for_each_entry(item
, l
, list
) {