1 /******************************************************************************
2 *******************************************************************************
4 ** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
5 ** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
7 ** This copyrighted material is made available to anyone wishing to use,
8 ** modify, copy, or redistribute it subject to the terms and conditions
9 ** of the GNU General Public License v.2.
11 *******************************************************************************
12 ******************************************************************************/
17 * This is the "low-level" comms layer.
19 * It is responsible for sending/receiving messages
20 * from other nodes in the cluster.
22 * Cluster nodes are referred to by their nodeids. nodeids are
23 * simply 32 bit numbers to the locking module - if they need to
24 * be expanded for the cluster infrastructure then that is it's
25 * responsibility. It is this layer's
26 * responsibility to resolve these into IP address or
27 * whatever it needs for inter-node communication.
29 * The comms level is two kernel threads that deal mainly with
30 * the receiving of messages from other nodes and passing them
31 * up to the mid-level comms layer (which understands the
32 * message format) for execution by the locking core, and
33 * a send thread which does all the setting up of connections
34 * to remote nodes and the sending of data. Threads are not allowed
35 * to send their own data because it may cause them to wait in times
36 * of high load. Also, this way, the sending thread can collect together
37 * messages bound for one node and send them in one block.
39 * I don't see any problem with the recv thread executing the locking
40 * code on behalf of remote processes as the locking code is
41 * short, efficient and never waits.
46 #include <asm/ioctls.h>
49 #include <linux/pagemap.h>
51 #include "dlm_internal.h"
62 #define NODE_INCREMENT 32
63 static void cbuf_add(struct cbuf
*cb
, int n
)
68 static int cbuf_data(struct cbuf
*cb
)
70 return ((cb
->base
+ cb
->len
) & cb
->mask
);
73 static void cbuf_init(struct cbuf
*cb
, int size
)
75 cb
->base
= cb
->len
= 0;
79 static void cbuf_eat(struct cbuf
*cb
, int n
)
86 static bool cbuf_empty(struct cbuf
*cb
)
91 /* Maximum number of incoming messages to process before
92 doing a cond_resched()
94 #define MAX_RX_MSG_COUNT 25
97 struct socket
*sock
; /* NULL if not connected */
98 uint32_t nodeid
; /* So we know who we are in the list */
99 struct mutex sock_mutex
;
100 unsigned long flags
; /* bit 1,2 = We are on the read/write lists */
101 #define CF_READ_PENDING 1
102 #define CF_WRITE_PENDING 2
103 #define CF_CONNECT_PENDING 3
104 #define CF_IS_OTHERCON 4
105 struct list_head writequeue
; /* List of outgoing writequeue_entries */
106 struct list_head listenlist
; /* List of allocated listening sockets */
107 spinlock_t writequeue_lock
;
108 int (*rx_action
) (struct connection
*); /* What to do when active */
109 struct page
*rx_page
;
112 #define MAX_CONNECT_RETRIES 3
113 struct connection
*othercon
;
114 struct work_struct rwork
; /* Receive workqueue */
115 struct work_struct swork
; /* Send workqueue */
117 #define sock2con(x) ((struct connection *)(x)->sk_user_data)
119 /* An entry waiting to be sent */
120 struct writequeue_entry
{
121 struct list_head list
;
127 struct connection
*con
;
130 static struct sockaddr_storage dlm_local_addr
;
133 static struct workqueue_struct
*recv_workqueue
;
134 static struct workqueue_struct
*send_workqueue
;
136 /* An array of pointers to connections, indexed by NODEID */
137 static struct connection
**connections
;
138 static DECLARE_MUTEX(connections_lock
);
139 static struct kmem_cache
*con_cache
;
140 static int conn_array_size
;
142 static void process_recv_sockets(struct work_struct
*work
);
143 static void process_send_sockets(struct work_struct
*work
);
145 static struct connection
*nodeid2con(int nodeid
, gfp_t allocation
)
147 struct connection
*con
= NULL
;
149 down(&connections_lock
);
150 if (nodeid
>= conn_array_size
) {
151 int new_size
= nodeid
+ NODE_INCREMENT
;
152 struct connection
**new_conns
;
154 new_conns
= kzalloc(sizeof(struct connection
*) *
155 new_size
, allocation
);
159 memcpy(new_conns
, connections
, sizeof(struct connection
*) * conn_array_size
);
160 conn_array_size
= new_size
;
162 connections
= new_conns
;
166 con
= connections
[nodeid
];
167 if (con
== NULL
&& allocation
) {
168 con
= kmem_cache_zalloc(con_cache
, allocation
);
172 con
->nodeid
= nodeid
;
173 mutex_init(&con
->sock_mutex
);
174 INIT_LIST_HEAD(&con
->writequeue
);
175 spin_lock_init(&con
->writequeue_lock
);
176 INIT_WORK(&con
->swork
, process_send_sockets
);
177 INIT_WORK(&con
->rwork
, process_recv_sockets
);
179 connections
[nodeid
] = con
;
183 up(&connections_lock
);
187 /* Data available on socket or listen socket received a connect */
188 static void lowcomms_data_ready(struct sock
*sk
, int count_unused
)
190 struct connection
*con
= sock2con(sk
);
192 if (!test_and_set_bit(CF_READ_PENDING
, &con
->flags
))
193 queue_work(recv_workqueue
, &con
->rwork
);
196 static void lowcomms_write_space(struct sock
*sk
)
198 struct connection
*con
= sock2con(sk
);
200 if (!test_and_set_bit(CF_WRITE_PENDING
, &con
->flags
))
201 queue_work(send_workqueue
, &con
->swork
);
204 static inline void lowcomms_connect_sock(struct connection
*con
)
206 if (!test_and_set_bit(CF_CONNECT_PENDING
, &con
->flags
))
207 queue_work(send_workqueue
, &con
->swork
);
210 static void lowcomms_state_change(struct sock
*sk
)
212 if (sk
->sk_state
== TCP_ESTABLISHED
)
213 lowcomms_write_space(sk
);
216 /* Make a socket active */
217 static int add_sock(struct socket
*sock
, struct connection
*con
)
221 /* Install a data_ready callback */
222 con
->sock
->sk
->sk_data_ready
= lowcomms_data_ready
;
223 con
->sock
->sk
->sk_write_space
= lowcomms_write_space
;
224 con
->sock
->sk
->sk_state_change
= lowcomms_state_change
;
229 /* Add the port number to an IP6 or 4 sockaddr and return the address
231 static void make_sockaddr(struct sockaddr_storage
*saddr
, uint16_t port
,
234 saddr
->ss_family
= dlm_local_addr
.ss_family
;
235 if (saddr
->ss_family
== AF_INET
) {
236 struct sockaddr_in
*in4_addr
= (struct sockaddr_in
*)saddr
;
237 in4_addr
->sin_port
= cpu_to_be16(port
);
238 *addr_len
= sizeof(struct sockaddr_in
);
240 struct sockaddr_in6
*in6_addr
= (struct sockaddr_in6
*)saddr
;
241 in6_addr
->sin6_port
= cpu_to_be16(port
);
242 *addr_len
= sizeof(struct sockaddr_in6
);
246 /* Close a remote connection and tidy up */
247 static void close_connection(struct connection
*con
, bool and_other
)
249 mutex_lock(&con
->sock_mutex
);
252 sock_release(con
->sock
);
255 if (con
->othercon
&& and_other
) {
256 /* Will only re-enter once. */
257 close_connection(con
->othercon
, false);
260 __free_page(con
->rx_page
);
264 mutex_unlock(&con
->sock_mutex
);
267 /* Data received from remote end */
268 static int receive_from_sock(struct connection
*con
)
271 struct msghdr msg
= {};
275 int call_again_soon
= 0;
278 mutex_lock(&con
->sock_mutex
);
280 if (con
->sock
== NULL
) {
285 if (con
->rx_page
== NULL
) {
287 * This doesn't need to be atomic, but I think it should
288 * improve performance if it is.
290 con
->rx_page
= alloc_page(GFP_ATOMIC
);
291 if (con
->rx_page
== NULL
)
293 cbuf_init(&con
->cb
, PAGE_CACHE_SIZE
);
297 * iov[0] is the bit of the circular buffer between the current end
298 * point (cb.base + cb.len) and the end of the buffer.
300 iov
[0].iov_len
= con
->cb
.base
- cbuf_data(&con
->cb
);
301 iov
[0].iov_base
= page_address(con
->rx_page
) + cbuf_data(&con
->cb
);
305 * iov[1] is the bit of the circular buffer between the start of the
306 * buffer and the start of the currently used section (cb.base)
308 if (cbuf_data(&con
->cb
) >= con
->cb
.base
) {
309 iov
[0].iov_len
= PAGE_CACHE_SIZE
- cbuf_data(&con
->cb
);
310 iov
[1].iov_len
= con
->cb
.base
;
311 iov
[1].iov_base
= page_address(con
->rx_page
);
314 len
= iov
[0].iov_len
+ iov
[1].iov_len
;
316 r
= ret
= kernel_recvmsg(con
->sock
, &msg
, iov
, nvec
, len
,
317 MSG_DONTWAIT
| MSG_NOSIGNAL
);
326 cbuf_add(&con
->cb
, ret
);
327 ret
= dlm_process_incoming_buffer(con
->nodeid
,
328 page_address(con
->rx_page
),
329 con
->cb
.base
, con
->cb
.len
,
331 if (ret
== -EBADMSG
) {
332 printk(KERN_INFO
"dlm: lowcomms: addr=%p, base=%u, len=%u, "
333 "iov_len=%u, iov_base[0]=%p, read=%d\n",
334 page_address(con
->rx_page
), con
->cb
.base
, con
->cb
.len
,
335 len
, iov
[0].iov_base
, r
);
339 cbuf_eat(&con
->cb
, ret
);
341 if (cbuf_empty(&con
->cb
) && !call_again_soon
) {
342 __free_page(con
->rx_page
);
348 mutex_unlock(&con
->sock_mutex
);
352 if (!test_and_set_bit(CF_READ_PENDING
, &con
->flags
))
353 queue_work(recv_workqueue
, &con
->rwork
);
354 mutex_unlock(&con
->sock_mutex
);
358 mutex_unlock(&con
->sock_mutex
);
359 if (ret
!= -EAGAIN
&& !test_bit(CF_IS_OTHERCON
, &con
->flags
)) {
360 close_connection(con
, false);
361 /* Reconnect when there is something to send */
363 /* Don't return success if we really got EOF */
370 /* Listening socket is busy, accept a connection */
371 static int accept_from_sock(struct connection
*con
)
374 struct sockaddr_storage peeraddr
;
375 struct socket
*newsock
;
378 struct connection
*newcon
;
379 struct connection
*addcon
;
381 memset(&peeraddr
, 0, sizeof(peeraddr
));
382 result
= sock_create_kern(dlm_local_addr
.ss_family
, SOCK_STREAM
,
383 IPPROTO_TCP
, &newsock
);
387 mutex_lock_nested(&con
->sock_mutex
, 0);
390 if (con
->sock
== NULL
)
393 newsock
->type
= con
->sock
->type
;
394 newsock
->ops
= con
->sock
->ops
;
396 result
= con
->sock
->ops
->accept(con
->sock
, newsock
, O_NONBLOCK
);
400 /* Get the connected socket's peer */
401 memset(&peeraddr
, 0, sizeof(peeraddr
));
402 if (newsock
->ops
->getname(newsock
, (struct sockaddr
*)&peeraddr
,
404 result
= -ECONNABORTED
;
408 /* Get the new node's NODEID */
409 make_sockaddr(&peeraddr
, 0, &len
);
410 if (dlm_addr_to_nodeid(&peeraddr
, &nodeid
)) {
411 printk("dlm: connect from non cluster node\n");
412 sock_release(newsock
);
413 mutex_unlock(&con
->sock_mutex
);
417 log_print("got connection from %d", nodeid
);
419 /* Check to see if we already have a connection to this node. This
420 * could happen if the two nodes initiate a connection at roughly
421 * the same time and the connections cross on the wire.
423 * In this case we store the incoming one in "othercon"
425 newcon
= nodeid2con(nodeid
, GFP_KERNEL
);
430 mutex_lock_nested(&newcon
->sock_mutex
, 1);
432 struct connection
*othercon
= newcon
->othercon
;
435 othercon
= kmem_cache_zalloc(con_cache
, GFP_KERNEL
);
437 printk("dlm: failed to allocate incoming socket\n");
438 mutex_unlock(&newcon
->sock_mutex
);
442 othercon
->nodeid
= nodeid
;
443 othercon
->rx_action
= receive_from_sock
;
444 mutex_init(&othercon
->sock_mutex
);
445 INIT_WORK(&othercon
->swork
, process_send_sockets
);
446 INIT_WORK(&othercon
->rwork
, process_recv_sockets
);
447 set_bit(CF_IS_OTHERCON
, &othercon
->flags
);
448 newcon
->othercon
= othercon
;
450 othercon
->sock
= newsock
;
451 newsock
->sk
->sk_user_data
= othercon
;
452 add_sock(newsock
, othercon
);
456 newsock
->sk
->sk_user_data
= newcon
;
457 newcon
->rx_action
= receive_from_sock
;
458 add_sock(newsock
, newcon
);
462 mutex_unlock(&newcon
->sock_mutex
);
465 * Add it to the active queue in case we got data
466 * beween processing the accept adding the socket
467 * to the read_sockets list
469 if (!test_and_set_bit(CF_READ_PENDING
, &addcon
->flags
))
470 queue_work(recv_workqueue
, &addcon
->rwork
);
471 mutex_unlock(&con
->sock_mutex
);
476 mutex_unlock(&con
->sock_mutex
);
477 sock_release(newsock
);
479 if (result
!= -EAGAIN
)
480 printk("dlm: error accepting connection from node: %d\n", result
);
484 /* Connect a new socket to its peer */
485 static void connect_to_sock(struct connection
*con
)
487 int result
= -EHOSTUNREACH
;
488 struct sockaddr_storage saddr
;
492 if (con
->nodeid
== 0) {
493 log_print("attempt to connect sock 0 foiled");
497 mutex_lock(&con
->sock_mutex
);
498 if (con
->retries
++ > MAX_CONNECT_RETRIES
)
501 /* Some odd races can cause double-connects, ignore them */
507 /* Create a socket to communicate with */
508 result
= sock_create_kern(dlm_local_addr
.ss_family
, SOCK_STREAM
,
513 memset(&saddr
, 0, sizeof(saddr
));
514 if (dlm_nodeid_to_addr(con
->nodeid
, &saddr
))
517 sock
->sk
->sk_user_data
= con
;
518 con
->rx_action
= receive_from_sock
;
520 make_sockaddr(&saddr
, dlm_config
.ci_tcp_port
, &addr_len
);
524 log_print("connecting to %d", con
->nodeid
);
526 sock
->ops
->connect(sock
, (struct sockaddr
*)&saddr
, addr_len
,
528 if (result
== -EINPROGRESS
)
535 sock_release(con
->sock
);
539 * Some errors are fatal and this list might need adjusting. For other
540 * errors we try again until the max number of retries is reached.
542 if (result
!= -EHOSTUNREACH
&& result
!= -ENETUNREACH
&&
543 result
!= -ENETDOWN
&& result
!= EINVAL
544 && result
!= -EPROTONOSUPPORT
) {
545 lowcomms_connect_sock(con
);
549 mutex_unlock(&con
->sock_mutex
);
553 static struct socket
*create_listen_sock(struct connection
*con
,
554 struct sockaddr_storage
*saddr
)
556 struct socket
*sock
= NULL
;
562 if (dlm_local_addr
.ss_family
== AF_INET
)
563 addr_len
= sizeof(struct sockaddr_in
);
565 addr_len
= sizeof(struct sockaddr_in6
);
567 /* Create a socket to communicate with */
568 result
= sock_create_kern(dlm_local_addr
.ss_family
, SOCK_STREAM
, IPPROTO_TCP
, &sock
);
570 printk("dlm: Can't create listening comms socket\n");
576 result
= sock_setsockopt(sock
, SOL_SOCKET
, SO_REUSEADDR
,
577 (char *)&one
, sizeof(one
));
580 printk("dlm: Failed to set SO_REUSEADDR on socket: result=%d\n",
583 sock
->sk
->sk_user_data
= con
;
584 con
->rx_action
= accept_from_sock
;
587 /* Bind to our port */
588 make_sockaddr(saddr
, dlm_config
.ci_tcp_port
, &addr_len
);
589 result
= sock
->ops
->bind(sock
, (struct sockaddr
*) saddr
, addr_len
);
591 printk("dlm: Can't bind to port %d\n", dlm_config
.ci_tcp_port
);
601 result
= sock_setsockopt(sock
, SOL_SOCKET
, SO_KEEPALIVE
,
602 (char *)&one
, sizeof(one
));
605 printk("dlm: Set keepalive failed: %d\n", result
);
608 result
= sock
->ops
->listen(sock
, 5);
610 printk("dlm: Can't listen on port %d\n", dlm_config
.ci_tcp_port
);
621 /* Listen on all interfaces */
622 static int listen_for_all(void)
624 struct socket
*sock
= NULL
;
625 struct connection
*con
= nodeid2con(0, GFP_KERNEL
);
626 int result
= -EINVAL
;
628 /* We don't support multi-homed hosts */
629 set_bit(CF_IS_OTHERCON
, &con
->flags
);
631 sock
= create_listen_sock(con
, &dlm_local_addr
);
637 result
= -EADDRINUSE
;
645 static struct writequeue_entry
*new_writequeue_entry(struct connection
*con
,
648 struct writequeue_entry
*entry
;
650 entry
= kmalloc(sizeof(struct writequeue_entry
), allocation
);
654 entry
->page
= alloc_page(allocation
);
669 void *dlm_lowcomms_get_buffer(int nodeid
, int len
,
670 gfp_t allocation
, char **ppc
)
672 struct connection
*con
;
673 struct writequeue_entry
*e
;
677 con
= nodeid2con(nodeid
, allocation
);
681 spin_lock(&con
->writequeue_lock
);
682 e
= list_entry(con
->writequeue
.prev
, struct writequeue_entry
, list
);
683 if ((&e
->list
== &con
->writequeue
) ||
684 (PAGE_CACHE_SIZE
- e
->end
< len
)) {
691 spin_unlock(&con
->writequeue_lock
);
697 *ppc
= page_address(e
->page
) + offset
;
701 e
= new_writequeue_entry(con
, allocation
);
703 spin_lock(&con
->writequeue_lock
);
707 list_add_tail(&e
->list
, &con
->writequeue
);
708 spin_unlock(&con
->writequeue_lock
);
714 void dlm_lowcomms_commit_buffer(void *mh
)
716 struct writequeue_entry
*e
= (struct writequeue_entry
*)mh
;
717 struct connection
*con
= e
->con
;
720 spin_lock(&con
->writequeue_lock
);
724 e
->len
= e
->end
- e
->offset
;
726 spin_unlock(&con
->writequeue_lock
);
728 if (!test_and_set_bit(CF_WRITE_PENDING
, &con
->flags
)) {
729 queue_work(send_workqueue
, &con
->swork
);
734 spin_unlock(&con
->writequeue_lock
);
738 static void free_entry(struct writequeue_entry
*e
)
740 __free_page(e
->page
);
745 static void send_to_sock(struct connection
*con
)
748 ssize_t(*sendpage
) (struct socket
*, struct page
*, int, size_t, int);
749 const int msg_flags
= MSG_DONTWAIT
| MSG_NOSIGNAL
;
750 struct writequeue_entry
*e
;
753 mutex_lock(&con
->sock_mutex
);
754 if (con
->sock
== NULL
)
757 sendpage
= con
->sock
->ops
->sendpage
;
759 spin_lock(&con
->writequeue_lock
);
761 e
= list_entry(con
->writequeue
.next
, struct writequeue_entry
,
763 if ((struct list_head
*) e
== &con
->writequeue
)
768 BUG_ON(len
== 0 && e
->users
== 0);
769 spin_unlock(&con
->writequeue_lock
);
774 ret
= sendpage(con
->sock
, e
->page
, offset
, len
,
776 if (ret
== -EAGAIN
|| ret
== 0)
782 /* Don't starve people filling buffers */
786 spin_lock(&con
->writequeue_lock
);
790 if (e
->len
== 0 && e
->users
== 0) {
797 spin_unlock(&con
->writequeue_lock
);
799 mutex_unlock(&con
->sock_mutex
);
803 mutex_unlock(&con
->sock_mutex
);
804 close_connection(con
, false);
805 lowcomms_connect_sock(con
);
809 mutex_unlock(&con
->sock_mutex
);
810 connect_to_sock(con
);
814 static void clean_one_writequeue(struct connection
*con
)
816 struct list_head
*list
;
817 struct list_head
*temp
;
819 spin_lock(&con
->writequeue_lock
);
820 list_for_each_safe(list
, temp
, &con
->writequeue
) {
821 struct writequeue_entry
*e
=
822 list_entry(list
, struct writequeue_entry
, list
);
826 spin_unlock(&con
->writequeue_lock
);
829 /* Called from recovery when it knows that a node has
831 int dlm_lowcomms_close(int nodeid
)
833 struct connection
*con
;
838 log_print("closing connection to node %d", nodeid
);
839 con
= nodeid2con(nodeid
, 0);
841 clean_one_writequeue(con
);
842 close_connection(con
, true);
850 /* Look for activity on active sockets */
851 static void process_recv_sockets(struct work_struct
*work
)
853 struct connection
*con
= container_of(work
, struct connection
, rwork
);
856 clear_bit(CF_READ_PENDING
, &con
->flags
);
858 err
= con
->rx_action(con
);
863 static void process_send_sockets(struct work_struct
*work
)
865 struct connection
*con
= container_of(work
, struct connection
, swork
);
867 if (test_and_clear_bit(CF_CONNECT_PENDING
, &con
->flags
)) {
868 connect_to_sock(con
);
871 clear_bit(CF_WRITE_PENDING
, &con
->flags
);
876 /* Discard all entries on the write queues */
877 static void clean_writequeues(void)
881 for (nodeid
= 1; nodeid
< conn_array_size
; nodeid
++) {
882 struct connection
*con
= nodeid2con(nodeid
, 0);
885 clean_one_writequeue(con
);
889 static void work_stop(void)
891 destroy_workqueue(recv_workqueue
);
892 destroy_workqueue(send_workqueue
);
895 static int work_start(void)
898 recv_workqueue
= create_workqueue("dlm_recv");
899 error
= IS_ERR(recv_workqueue
);
901 log_print("can't start dlm_recv %d", error
);
905 send_workqueue
= create_singlethread_workqueue("dlm_send");
906 error
= IS_ERR(send_workqueue
);
908 log_print("can't start dlm_send %d", error
);
909 destroy_workqueue(recv_workqueue
);
916 void dlm_lowcomms_stop(void)
920 /* Set all the flags to prevent any
923 for (i
= 0; i
< conn_array_size
; i
++) {
925 connections
[i
]->flags
|= 0xFF;
931 for (i
= 0; i
< conn_array_size
; i
++) {
932 if (connections
[i
]) {
933 close_connection(connections
[i
], true);
934 if (connections
[i
]->othercon
)
935 kmem_cache_free(con_cache
, connections
[i
]->othercon
);
936 kmem_cache_free(con_cache
, connections
[i
]);
943 kmem_cache_destroy(con_cache
);
946 /* This is quite likely to sleep... */
947 int dlm_lowcomms_start(void)
952 connections
= kzalloc(sizeof(struct connection
*) *
953 NODE_INCREMENT
, GFP_KERNEL
);
957 conn_array_size
= NODE_INCREMENT
;
959 if (dlm_our_addr(&dlm_local_addr
, 0)) {
960 log_print("no local IP address has been set");
963 if (!dlm_our_addr(&dlm_local_addr
, 1)) {
964 log_print("This dlm comms module does not support multi-homed clustering");
968 con_cache
= kmem_cache_create("dlm_conn", sizeof(struct connection
),
969 __alignof__(struct connection
), 0,
975 /* Start listening */
976 error
= listen_for_all();
980 error
= work_start();
987 close_connection(connections
[0], false);
988 kmem_cache_free(con_cache
, connections
[0]);
989 kmem_cache_destroy(con_cache
);
999 * Overrides for Emacs so that we follow Linus's tabbing style.
1000 * Emacs will notice this stuff at the end of the file and automatically
1001 * adjust the settings for this buffer only. This must remain at the end
1003 * ---------------------------------------------------------------------------
1005 * c-file-style: "linux"