2 * Copyright (c) 2014 Chelsio, Inc. All rights reserved.
3 * Copyright (c) 2014 Intel Corporation. All rights reserved.
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
34 #include "iwpm_util.h"
36 #define IWPM_HASH_BUCKET_SIZE 512
37 #define IWPM_HASH_BUCKET_MASK (IWPM_HASH_BUCKET_SIZE - 1)
39 static LIST_HEAD(iwpm_nlmsg_req_list
);
40 static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock
);
42 static struct hlist_head
*iwpm_hash_bucket
;
43 static DEFINE_SPINLOCK(iwpm_mapinfo_lock
);
45 static DEFINE_MUTEX(iwpm_admin_lock
);
46 static struct iwpm_admin_data iwpm_admin
;
48 int iwpm_init(u8 nl_client
)
50 if (iwpm_valid_client(nl_client
))
52 mutex_lock(&iwpm_admin_lock
);
53 if (atomic_read(&iwpm_admin
.refcount
) == 0) {
54 iwpm_hash_bucket
= kzalloc(IWPM_HASH_BUCKET_SIZE
*
55 sizeof(struct hlist_head
), GFP_KERNEL
);
56 if (!iwpm_hash_bucket
) {
57 mutex_unlock(&iwpm_admin_lock
);
58 pr_err("%s Unable to create mapinfo hash table\n", __func__
);
62 atomic_inc(&iwpm_admin
.refcount
);
63 mutex_unlock(&iwpm_admin_lock
);
64 iwpm_set_valid(nl_client
, 1);
67 EXPORT_SYMBOL(iwpm_init
);
69 static void free_hash_bucket(void);
71 int iwpm_exit(u8 nl_client
)
74 if (!iwpm_valid_client(nl_client
))
76 mutex_lock(&iwpm_admin_lock
);
77 if (atomic_read(&iwpm_admin
.refcount
) == 0) {
78 mutex_unlock(&iwpm_admin_lock
);
79 pr_err("%s Incorrect usage - negative refcount\n", __func__
);
82 if (atomic_dec_and_test(&iwpm_admin
.refcount
)) {
84 pr_debug("%s: Mapinfo hash table is destroyed\n", __func__
);
86 mutex_unlock(&iwpm_admin_lock
);
87 iwpm_set_valid(nl_client
, 0);
90 EXPORT_SYMBOL(iwpm_exit
);
92 static struct hlist_head
*get_hash_bucket_head(struct sockaddr_storage
*,
93 struct sockaddr_storage
*);
95 int iwpm_create_mapinfo(struct sockaddr_storage
*local_sockaddr
,
96 struct sockaddr_storage
*mapped_sockaddr
,
99 struct hlist_head
*hash_bucket_head
;
100 struct iwpm_mapping_info
*map_info
;
103 if (!iwpm_valid_client(nl_client
))
105 map_info
= kzalloc(sizeof(struct iwpm_mapping_info
), GFP_KERNEL
);
107 pr_err("%s: Unable to allocate a mapping info\n", __func__
);
110 memcpy(&map_info
->local_sockaddr
, local_sockaddr
,
111 sizeof(struct sockaddr_storage
));
112 memcpy(&map_info
->mapped_sockaddr
, mapped_sockaddr
,
113 sizeof(struct sockaddr_storage
));
114 map_info
->nl_client
= nl_client
;
116 spin_lock_irqsave(&iwpm_mapinfo_lock
, flags
);
117 if (iwpm_hash_bucket
) {
118 hash_bucket_head
= get_hash_bucket_head(
119 &map_info
->local_sockaddr
,
120 &map_info
->mapped_sockaddr
);
121 hlist_add_head(&map_info
->hlist_node
, hash_bucket_head
);
123 spin_unlock_irqrestore(&iwpm_mapinfo_lock
, flags
);
126 EXPORT_SYMBOL(iwpm_create_mapinfo
);
128 int iwpm_remove_mapinfo(struct sockaddr_storage
*local_sockaddr
,
129 struct sockaddr_storage
*mapped_local_addr
)
131 struct hlist_node
*tmp_hlist_node
;
132 struct hlist_head
*hash_bucket_head
;
133 struct iwpm_mapping_info
*map_info
= NULL
;
137 spin_lock_irqsave(&iwpm_mapinfo_lock
, flags
);
138 if (iwpm_hash_bucket
) {
139 hash_bucket_head
= get_hash_bucket_head(
142 hlist_for_each_entry_safe(map_info
, tmp_hlist_node
,
143 hash_bucket_head
, hlist_node
) {
145 if (!iwpm_compare_sockaddr(&map_info
->mapped_sockaddr
,
146 mapped_local_addr
)) {
148 hlist_del_init(&map_info
->hlist_node
);
155 spin_unlock_irqrestore(&iwpm_mapinfo_lock
, flags
);
158 EXPORT_SYMBOL(iwpm_remove_mapinfo
);
160 static void free_hash_bucket(void)
162 struct hlist_node
*tmp_hlist_node
;
163 struct iwpm_mapping_info
*map_info
;
167 /* remove all the mapinfo data from the list */
168 spin_lock_irqsave(&iwpm_mapinfo_lock
, flags
);
169 for (i
= 0; i
< IWPM_HASH_BUCKET_SIZE
; i
++) {
170 hlist_for_each_entry_safe(map_info
, tmp_hlist_node
,
171 &iwpm_hash_bucket
[i
], hlist_node
) {
173 hlist_del_init(&map_info
->hlist_node
);
177 /* free the hash list */
178 kfree(iwpm_hash_bucket
);
179 iwpm_hash_bucket
= NULL
;
180 spin_unlock_irqrestore(&iwpm_mapinfo_lock
, flags
);
183 struct iwpm_nlmsg_request
*iwpm_get_nlmsg_request(__u32 nlmsg_seq
,
184 u8 nl_client
, gfp_t gfp
)
186 struct iwpm_nlmsg_request
*nlmsg_request
= NULL
;
189 nlmsg_request
= kzalloc(sizeof(struct iwpm_nlmsg_request
), gfp
);
190 if (!nlmsg_request
) {
191 pr_err("%s Unable to allocate a nlmsg_request\n", __func__
);
194 spin_lock_irqsave(&iwpm_nlmsg_req_lock
, flags
);
195 list_add_tail(&nlmsg_request
->inprocess_list
, &iwpm_nlmsg_req_list
);
196 spin_unlock_irqrestore(&iwpm_nlmsg_req_lock
, flags
);
198 kref_init(&nlmsg_request
->kref
);
199 kref_get(&nlmsg_request
->kref
);
200 nlmsg_request
->nlmsg_seq
= nlmsg_seq
;
201 nlmsg_request
->nl_client
= nl_client
;
202 nlmsg_request
->request_done
= 0;
203 nlmsg_request
->err_code
= 0;
204 return nlmsg_request
;
207 void iwpm_free_nlmsg_request(struct kref
*kref
)
209 struct iwpm_nlmsg_request
*nlmsg_request
;
212 nlmsg_request
= container_of(kref
, struct iwpm_nlmsg_request
, kref
);
214 spin_lock_irqsave(&iwpm_nlmsg_req_lock
, flags
);
215 list_del_init(&nlmsg_request
->inprocess_list
);
216 spin_unlock_irqrestore(&iwpm_nlmsg_req_lock
, flags
);
218 if (!nlmsg_request
->request_done
)
219 pr_debug("%s Freeing incomplete nlmsg request (seq = %u).\n",
220 __func__
, nlmsg_request
->nlmsg_seq
);
221 kfree(nlmsg_request
);
224 struct iwpm_nlmsg_request
*iwpm_find_nlmsg_request(__u32 echo_seq
)
226 struct iwpm_nlmsg_request
*nlmsg_request
;
227 struct iwpm_nlmsg_request
*found_request
= NULL
;
230 spin_lock_irqsave(&iwpm_nlmsg_req_lock
, flags
);
231 list_for_each_entry(nlmsg_request
, &iwpm_nlmsg_req_list
,
233 if (nlmsg_request
->nlmsg_seq
== echo_seq
) {
234 found_request
= nlmsg_request
;
235 kref_get(&nlmsg_request
->kref
);
239 spin_unlock_irqrestore(&iwpm_nlmsg_req_lock
, flags
);
240 return found_request
;
243 int iwpm_wait_complete_req(struct iwpm_nlmsg_request
*nlmsg_request
)
246 init_waitqueue_head(&nlmsg_request
->waitq
);
248 ret
= wait_event_timeout(nlmsg_request
->waitq
,
249 (nlmsg_request
->request_done
!= 0), IWPM_NL_TIMEOUT
);
252 pr_info("%s: Timeout %d sec for netlink request (seq = %u)\n",
253 __func__
, (IWPM_NL_TIMEOUT
/HZ
), nlmsg_request
->nlmsg_seq
);
255 ret
= nlmsg_request
->err_code
;
257 kref_put(&nlmsg_request
->kref
, iwpm_free_nlmsg_request
);
261 int iwpm_get_nlmsg_seq(void)
263 return atomic_inc_return(&iwpm_admin
.nlmsg_seq
);
266 int iwpm_valid_client(u8 nl_client
)
268 if (nl_client
>= RDMA_NL_NUM_CLIENTS
)
270 return iwpm_admin
.client_list
[nl_client
];
273 void iwpm_set_valid(u8 nl_client
, int valid
)
275 if (nl_client
>= RDMA_NL_NUM_CLIENTS
)
277 iwpm_admin
.client_list
[nl_client
] = valid
;
281 int iwpm_registered_client(u8 nl_client
)
283 return iwpm_admin
.reg_list
[nl_client
];
287 void iwpm_set_registered(u8 nl_client
, int reg
)
289 iwpm_admin
.reg_list
[nl_client
] = reg
;
292 int iwpm_compare_sockaddr(struct sockaddr_storage
*a_sockaddr
,
293 struct sockaddr_storage
*b_sockaddr
)
295 if (a_sockaddr
->ss_family
!= b_sockaddr
->ss_family
)
297 if (a_sockaddr
->ss_family
== AF_INET
) {
298 struct sockaddr_in
*a4_sockaddr
=
299 (struct sockaddr_in
*)a_sockaddr
;
300 struct sockaddr_in
*b4_sockaddr
=
301 (struct sockaddr_in
*)b_sockaddr
;
302 if (!memcmp(&a4_sockaddr
->sin_addr
,
303 &b4_sockaddr
->sin_addr
, sizeof(struct in_addr
))
304 && a4_sockaddr
->sin_port
== b4_sockaddr
->sin_port
)
307 } else if (a_sockaddr
->ss_family
== AF_INET6
) {
308 struct sockaddr_in6
*a6_sockaddr
=
309 (struct sockaddr_in6
*)a_sockaddr
;
310 struct sockaddr_in6
*b6_sockaddr
=
311 (struct sockaddr_in6
*)b_sockaddr
;
312 if (!memcmp(&a6_sockaddr
->sin6_addr
,
313 &b6_sockaddr
->sin6_addr
, sizeof(struct in6_addr
))
314 && a6_sockaddr
->sin6_port
== b6_sockaddr
->sin6_port
)
318 pr_err("%s: Invalid sockaddr family\n", __func__
);
323 struct sk_buff
*iwpm_create_nlmsg(u32 nl_op
, struct nlmsghdr
**nlh
,
326 struct sk_buff
*skb
= NULL
;
328 skb
= dev_alloc_skb(NLMSG_GOODSIZE
);
330 pr_err("%s Unable to allocate skb\n", __func__
);
331 goto create_nlmsg_exit
;
333 if (!(ibnl_put_msg(skb
, nlh
, 0, 0, nl_client
, nl_op
,
335 pr_warn("%s: Unable to put the nlmsg header\n", __func__
);
343 int iwpm_parse_nlmsg(struct netlink_callback
*cb
, int policy_max
,
344 const struct nla_policy
*nlmsg_policy
,
345 struct nlattr
*nltb
[], const char *msg_type
)
349 const char *err_str
= "";
351 ret
= nlmsg_validate(cb
->nlh
, nlh_len
, policy_max
-1, nlmsg_policy
);
353 err_str
= "Invalid attribute";
354 goto parse_nlmsg_error
;
356 ret
= nlmsg_parse(cb
->nlh
, nlh_len
, nltb
, policy_max
-1, nlmsg_policy
);
358 err_str
= "Unable to parse the nlmsg";
359 goto parse_nlmsg_error
;
361 ret
= iwpm_validate_nlmsg_attr(nltb
, policy_max
);
363 err_str
= "Invalid NULL attribute";
364 goto parse_nlmsg_error
;
368 pr_warn("%s: %s (msg type %s ret = %d)\n",
369 __func__
, err_str
, msg_type
, ret
);
373 void iwpm_print_sockaddr(struct sockaddr_storage
*sockaddr
, char *msg
)
375 struct sockaddr_in6
*sockaddr_v6
;
376 struct sockaddr_in
*sockaddr_v4
;
378 switch (sockaddr
->ss_family
) {
380 sockaddr_v4
= (struct sockaddr_in
*)sockaddr
;
381 pr_debug("%s IPV4 %pI4: %u(0x%04X)\n",
382 msg
, &sockaddr_v4
->sin_addr
,
383 ntohs(sockaddr_v4
->sin_port
),
384 ntohs(sockaddr_v4
->sin_port
));
387 sockaddr_v6
= (struct sockaddr_in6
*)sockaddr
;
388 pr_debug("%s IPV6 %pI6: %u(0x%04X)\n",
389 msg
, &sockaddr_v6
->sin6_addr
,
390 ntohs(sockaddr_v6
->sin6_port
),
391 ntohs(sockaddr_v6
->sin6_port
));
398 static u32
iwpm_ipv6_jhash(struct sockaddr_in6
*ipv6_sockaddr
)
400 u32 ipv6_hash
= jhash(&ipv6_sockaddr
->sin6_addr
, sizeof(struct in6_addr
), 0);
401 u32 hash
= jhash_2words(ipv6_hash
, (__force u32
) ipv6_sockaddr
->sin6_port
, 0);
405 static u32
iwpm_ipv4_jhash(struct sockaddr_in
*ipv4_sockaddr
)
407 u32 ipv4_hash
= jhash(&ipv4_sockaddr
->sin_addr
, sizeof(struct in_addr
), 0);
408 u32 hash
= jhash_2words(ipv4_hash
, (__force u32
) ipv4_sockaddr
->sin_port
, 0);
412 static struct hlist_head
*get_hash_bucket_head(struct sockaddr_storage
414 struct sockaddr_storage
417 u32 local_hash
, mapped_hash
, hash
;
419 if (local_sockaddr
->ss_family
== AF_INET
) {
420 local_hash
= iwpm_ipv4_jhash((struct sockaddr_in
*) local_sockaddr
);
421 mapped_hash
= iwpm_ipv4_jhash((struct sockaddr_in
*) mapped_sockaddr
);
423 } else if (local_sockaddr
->ss_family
== AF_INET6
) {
424 local_hash
= iwpm_ipv6_jhash((struct sockaddr_in6
*) local_sockaddr
);
425 mapped_hash
= iwpm_ipv6_jhash((struct sockaddr_in6
*) mapped_sockaddr
);
427 pr_err("%s: Invalid sockaddr family\n", __func__
);
431 if (local_hash
== mapped_hash
) /* if port mapper isn't available */
434 hash
= jhash_2words(local_hash
, mapped_hash
, 0);
436 return &iwpm_hash_bucket
[hash
& IWPM_HASH_BUCKET_MASK
];
439 static int send_mapinfo_num(u32 mapping_num
, u8 nl_client
, int iwpm_pid
)
441 struct sk_buff
*skb
= NULL
;
442 struct nlmsghdr
*nlh
;
444 const char *err_str
= "";
447 skb
= iwpm_create_nlmsg(RDMA_NL_IWPM_MAPINFO_NUM
, &nlh
, nl_client
);
449 err_str
= "Unable to create a nlmsg";
450 goto mapinfo_num_error
;
452 nlh
->nlmsg_seq
= iwpm_get_nlmsg_seq();
454 err_str
= "Unable to put attribute of mapinfo number nlmsg";
455 ret
= ibnl_put_attr(skb
, nlh
, sizeof(u32
), &msg_seq
, IWPM_NLA_MAPINFO_SEQ
);
457 goto mapinfo_num_error
;
458 ret
= ibnl_put_attr(skb
, nlh
, sizeof(u32
),
459 &mapping_num
, IWPM_NLA_MAPINFO_SEND_NUM
);
461 goto mapinfo_num_error
;
462 ret
= ibnl_unicast(skb
, nlh
, iwpm_pid
);
465 err_str
= "Unable to send a nlmsg";
466 goto mapinfo_num_error
;
468 pr_debug("%s: Sent mapping number = %d\n", __func__
, mapping_num
);
471 pr_info("%s: %s\n", __func__
, err_str
);
477 static int send_nlmsg_done(struct sk_buff
*skb
, u8 nl_client
, int iwpm_pid
)
479 struct nlmsghdr
*nlh
= NULL
;
484 if (!(ibnl_put_msg(skb
, &nlh
, 0, 0, nl_client
,
485 RDMA_NL_IWPM_MAPINFO
, NLM_F_MULTI
))) {
486 pr_warn("%s Unable to put NLMSG_DONE\n", __func__
);
489 nlh
->nlmsg_type
= NLMSG_DONE
;
490 ret
= ibnl_unicast(skb
, (struct nlmsghdr
*)skb
->data
, iwpm_pid
);
492 pr_warn("%s Unable to send a nlmsg\n", __func__
);
496 int iwpm_send_mapinfo(u8 nl_client
, int iwpm_pid
)
498 struct iwpm_mapping_info
*map_info
;
499 struct sk_buff
*skb
= NULL
;
500 struct nlmsghdr
*nlh
;
501 int skb_num
= 0, mapping_num
= 0;
502 int i
= 0, nlmsg_bytes
= 0;
504 const char *err_str
= "";
507 skb
= dev_alloc_skb(NLMSG_GOODSIZE
);
510 err_str
= "Unable to allocate skb";
511 goto send_mapping_info_exit
;
514 spin_lock_irqsave(&iwpm_mapinfo_lock
, flags
);
515 for (i
= 0; i
< IWPM_HASH_BUCKET_SIZE
; i
++) {
516 hlist_for_each_entry(map_info
, &iwpm_hash_bucket
[i
],
518 if (map_info
->nl_client
!= nl_client
)
521 if (!(ibnl_put_msg(skb
, &nlh
, 0, 0, nl_client
,
522 RDMA_NL_IWPM_MAPINFO
, NLM_F_MULTI
))) {
524 err_str
= "Unable to put the nlmsg header";
525 goto send_mapping_info_unlock
;
527 err_str
= "Unable to put attribute of the nlmsg";
528 ret
= ibnl_put_attr(skb
, nlh
,
529 sizeof(struct sockaddr_storage
),
530 &map_info
->local_sockaddr
,
531 IWPM_NLA_MAPINFO_LOCAL_ADDR
);
533 goto send_mapping_info_unlock
;
535 ret
= ibnl_put_attr(skb
, nlh
,
536 sizeof(struct sockaddr_storage
),
537 &map_info
->mapped_sockaddr
,
538 IWPM_NLA_MAPINFO_MAPPED_ADDR
);
540 goto send_mapping_info_unlock
;
542 iwpm_print_sockaddr(&map_info
->local_sockaddr
,
543 "send_mapping_info: Local sockaddr:");
544 iwpm_print_sockaddr(&map_info
->mapped_sockaddr
,
545 "send_mapping_info: Mapped local sockaddr:");
547 nlmsg_bytes
+= nlh
->nlmsg_len
;
549 /* check if all mappings can fit in one skb */
550 if (NLMSG_GOODSIZE
- nlmsg_bytes
< nlh
->nlmsg_len
* 2) {
551 /* and leave room for NLMSG_DONE */
554 spin_unlock_irqrestore(&iwpm_mapinfo_lock
,
557 ret
= send_nlmsg_done(skb
, nl_client
, iwpm_pid
);
560 err_str
= "Unable to send map info";
561 goto send_mapping_info_exit
;
563 if (skb_num
== IWPM_MAPINFO_SKB_COUNT
) {
565 err_str
= "Insufficient skbs for map info";
566 goto send_mapping_info_exit
;
568 skb
= dev_alloc_skb(NLMSG_GOODSIZE
);
571 err_str
= "Unable to allocate skb";
572 goto send_mapping_info_exit
;
574 spin_lock_irqsave(&iwpm_mapinfo_lock
, flags
);
578 send_mapping_info_unlock
:
579 spin_unlock_irqrestore(&iwpm_mapinfo_lock
, flags
);
580 send_mapping_info_exit
:
582 pr_warn("%s: %s (ret = %d)\n", __func__
, err_str
, ret
);
587 send_nlmsg_done(skb
, nl_client
, iwpm_pid
);
588 return send_mapinfo_num(mapping_num
, nl_client
, iwpm_pid
);
591 int iwpm_mapinfo_available(void)
594 int full_bucket
= 0, i
= 0;
596 spin_lock_irqsave(&iwpm_mapinfo_lock
, flags
);
597 if (iwpm_hash_bucket
) {
598 for (i
= 0; i
< IWPM_HASH_BUCKET_SIZE
; i
++) {
599 if (!hlist_empty(&iwpm_hash_bucket
[i
])) {
605 spin_unlock_irqrestore(&iwpm_mapinfo_lock
, flags
);