4 * Copyright (C) 2002-2003 Sistina Software, Inc. All rights reserved.
5 * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
7 * This file is part of LVM2.
9 * This copyrighted material is made available to anyone wishing to use,
10 * modify, copy, or redistribute it subject to the terms and conditions
11 * of the GNU Lesser General Public License v.2.1.
13 * You should have received a copy of the GNU Lesser General Public License
14 * along with this program; if not, write to the Free Software Foundation,
15 * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 * This provides the interface between clvmd and gulm as the cluster
22 * It also provides the "liblm" functions too as it's hard (and pointless)
23 * to seperate them out when using gulm.
25 * What it does /not/ provide is the communications between clvmd daemons
26 * on the cluster nodes. That is done in tcp-comms.c
30 #include <sys/types.h>
31 #include <sys/utsname.h>
32 #include <sys/ioctl.h>
33 #include <sys/socket.h>
36 #include <sys/socket.h>
37 #include <netinet/in.h>
51 #include <libdevmapper.h>
56 #include "lvm-logging.h"
58 #include "clvmd-comms.h"
59 #include "lvm-functions.h"
61 #include "clvmd-gulm.h"
63 /* Hash list of nodes in the cluster */
64 static struct dm_hash_table
*node_hash
;
66 /* hash list of outstanding lock requests */
67 static struct dm_hash_table
*lock_hash
;
69 /* Copy of the current quorate state */
70 static uint8_t gulm_quorate
= 0;
71 static enum {INIT_NOTDONE
, INIT_DONE
, INIT_WAITQUORATE
} init_state
= INIT_NOTDONE
;
73 /* Number of active nodes */
76 static char *cluster_name
;
77 static int in_shutdown
= 0;
79 static pthread_mutex_t lock_start_mutex
;
80 static volatile int lock_start_flag
;
84 enum {NODE_UNKNOWN
, NODE_DOWN
, NODE_UP
, NODE_CLVMD
} state
;
85 char name
[GULM_MAX_CLUSTER_MEMBER_NAME_LEN
];
91 pthread_mutex_t mutex
;
96 static int read_from_core_sock(struct local_client
*client
, char *buf
, int len
, const char *csid
,
97 struct local_client
**new_client
);
98 static int read_from_lock_sock(struct local_client
*client
, char *buf
, int len
, const char *csid
,
99 struct local_client
**new_client
);
100 static int get_all_cluster_nodes(void);
101 static int _csid_from_name(char *csid
, const char *name
);
102 static void _cluster_closedown(void);
105 extern struct dm_hash_table
*sock_hash
;
107 static int add_internal_client(int fd
, fd_callback_t callback
)
109 struct local_client
*client
;
111 DEBUGLOG("Add_internal_client, fd = %d\n", fd
);
113 /* Add a GULM file descriptor it to the main loop */
114 client
= malloc(sizeof(struct local_client
));
117 DEBUGLOG("malloc failed\n");
121 memset(client
, 0, sizeof(struct local_client
));
123 client
->type
= CLUSTER_INTERNAL
;
124 client
->callback
= callback
;
127 /* Set Close-on-exec */
128 fcntl(fd
, F_SETFD
, 1);
133 /* Gulm library handle */
134 static gulm_interface_p gulm_if
;
135 static lg_core_callbacks_t core_callbacks
;
136 static lg_lockspace_callbacks_t lock_callbacks
;
138 static void badsig_handler(int sig
)
140 DEBUGLOG("got sig %d\n", sig
);
141 _cluster_closedown();
145 static void _reread_config(void)
147 /* Re-read CCS node list */
148 DEBUGLOG("Re-reading CCS config\n");
149 get_all_cluster_nodes();
152 static int _init_cluster(void)
159 /* Get cluster name from CCS */
160 ccs_h
= ccs_force_connect(NULL
, 0);
163 syslog(LOG_ERR
, "Cannot login in to CCSD server\n");
167 ccs_get(ccs_h
, "//cluster/@name", &cluster_name
);
168 DEBUGLOG("got cluster name %s\n", cluster_name
);
170 if (!ccs_get(ccs_h
, "//cluster/clvm/@port", &portstr
))
172 port
= atoi(portstr
);
174 DEBUGLOG("got port number %d\n", port
);
176 if (port
<= 0 && port
>= 65536)
180 ccs_disconnect(ccs_h
);
182 /* Block locking until we are logged in */
183 pthread_mutex_init(&lock_start_mutex
, NULL
);
184 pthread_mutex_lock(&lock_start_mutex
);
187 node_hash
= dm_hash_create(100);
188 lock_hash
= dm_hash_create(10);
190 /* Get all nodes from CCS */
191 if (get_all_cluster_nodes())
194 /* Initialise GULM library */
195 status
= lg_initialize(&gulm_if
, cluster_name
, "clvmd");
198 DEBUGLOG("lg_initialize failed: %d\n", status
);
202 /* Connect to core - we are not "important" :-) */
203 status
= lg_core_login(gulm_if
, 0);
206 DEBUGLOG("lg_core_login failed: %d\n", status
);
210 /* Initialise the inter-node comms */
211 status
= init_comms(port
);
215 /* Add core FD to the list */
216 status
= add_internal_client(lg_core_selector(gulm_if
), read_from_core_sock
);
219 DEBUGLOG("can't allocate client space\n");
223 /* Connect to the lock server */
224 if (lg_lock_login(gulm_if
, "CLVM"))
226 syslog(LOG_ERR
, "Cannot login in to LOCK server\n");
227 DEBUGLOG("Cannot login in to LOCK server\n");
231 /* Add lockspace FD to the list */
232 status
= add_internal_client(lg_lock_selector(gulm_if
), read_from_lock_sock
);
235 DEBUGLOG("can't allocate client space\n");
239 /* Request a list of nodes, we can't really do anything until
241 status
= lg_core_nodelist(gulm_if
);
244 DEBUGLOG("lg_core_nodelist failed: %d\n", status
);
248 /* So I can kill it without taking GULM down too */
249 signal(SIGINT
, badsig_handler
);
250 signal(SIGTERM
, badsig_handler
);
255 static void _cluster_closedown(void)
257 DEBUGLOG("cluster_closedown\n");
260 lg_lock_logout(gulm_if
);
261 lg_core_logout(gulm_if
);
265 /* Expire locks for a named node, or us */
266 #define GIO_KEY_SIZE 46
267 static void drop_expired_locks(char *nodename
)
269 struct utsname nodeinfo
;
270 uint8_t mask
[GIO_KEY_SIZE
];
272 DEBUGLOG("Dropping expired locks for %s\n", nodename
?nodename
:"(null)");
273 memset(mask
, 0xff, GIO_KEY_SIZE
);
278 nodename
= nodeinfo
.nodename
;
281 if (lg_lock_drop_exp(gulm_if
, nodename
, mask
, GIO_KEY_SIZE
))
283 DEBUGLOG("Error calling lg_lock_drop_exp()\n");
288 static int read_from_core_sock(struct local_client
*client
, char *buf
, int len
, const char *csid
,
289 struct local_client
**new_client
)
294 status
= lg_core_handle_messages(gulm_if
, &core_callbacks
, NULL
);
295 return status
<0 ? status
: 1;
298 static int read_from_lock_sock(struct local_client
*client
, char *buf
, int len
, const char *csid
,
299 struct local_client
**new_client
)
304 status
= lg_lock_handle_messages(gulm_if
, &lock_callbacks
, NULL
);
305 return status
<0 ? status
: 1;
309 /* CORE callback routines */
310 static int core_login_reply(void *misc
, uint64_t gen
, uint32_t error
, uint32_t rank
, uint8_t corestate
)
312 DEBUGLOG("CORE Got a Login reply. gen:%lld err:%d rank:%d corestate:%d\n",
313 gen
, error
, rank
, corestate
);
318 /* Get the current core state (for quorum) */
319 lg_core_corestate(gulm_if
);
324 static void set_node_state(struct node_info
*ninfo
, char *csid
, uint8_t nodestate
)
326 if (nodestate
== lg_core_Logged_in
)
328 /* Don't clobber NODE_CLVMD state */
329 if (ninfo
->state
!= NODE_CLVMD
)
331 if (ninfo
->state
== NODE_UNKNOWN
||
332 ninfo
->state
== NODE_DOWN
)
335 ninfo
->state
= NODE_UP
;
340 if (nodestate
== lg_core_Expired
||
341 nodestate
== lg_core_Fenced
||
342 nodestate
== lg_core_Logged_out
)
344 if (ninfo
->state
!= NODE_DOWN
)
346 ninfo
->state
= NODE_DOWN
;
349 /* Gulm doesn't always send node DOWN events, so even if this a a node UP we must
350 * assume (ahem) that it prevously went down at some time. So we close
351 * the sockets here to make sure that we don't have any dead connections
354 tcp_remove_client(csid
);
356 DEBUGLOG("set_node_state, '%s' state = %d num_nodes=%d\n",
357 ninfo
->name
, ninfo
->state
, num_nodes
);
360 static struct node_info
*add_or_set_node(char *name
, struct in6_addr
*ip
, uint8_t state
)
362 struct node_info
*ninfo
;
364 ninfo
= dm_hash_lookup_binary(node_hash
, (char *)ip
, GULM_MAX_CSID_LEN
);
367 /* If we can't find that node then re-read the config file in case it
368 was added after we were started */
369 DEBUGLOG("Node %s not found, re-reading config file\n", name
);
370 get_all_cluster_nodes();
373 ninfo
= dm_hash_lookup_binary(node_hash
, (char *)ip
, GULM_MAX_CSID_LEN
);
376 DEBUGLOG("Ignoring node %s, not part of the SAN cluster\n", name
);
381 set_node_state(ninfo
, (char *)ip
, state
);
386 static void _get_our_csid(char *csid
)
388 get_our_gulm_csid(csid
);
391 static int core_nodelist(void *misc
, lglcb_t type
, char *name
, struct in6_addr
*ip
, uint8_t state
)
393 DEBUGLOG("CORE nodelist\n");
395 if (type
== lglcb_start
)
397 DEBUGLOG("Got Nodelist, start\n");
401 if (type
== lglcb_item
)
403 DEBUGLOG("Got nodelist, item: %s, %#x\n", name
, state
);
405 add_or_set_node(name
, ip
, state
);
409 if (type
== lglcb_stop
)
411 char ourcsid
[GULM_MAX_CSID_LEN
];
413 DEBUGLOG("Got Nodelist, stop\n");
416 clvmd_cluster_init_completed();
417 init_state
= INIT_DONE
;
421 if (init_state
== INIT_NOTDONE
)
422 init_state
= INIT_WAITQUORATE
;
425 /* Mark ourself as up */
426 _get_our_csid(ourcsid
);
427 gulm_add_up_node(ourcsid
);
431 DEBUGLOG("Unknown lglcb_t %#x\n", type
);
439 static int core_statechange(void *misc
, uint8_t corestate
, uint8_t quorate
, struct in6_addr
*masterip
, char *mastername
)
441 DEBUGLOG("CORE Got statechange. quorate:%d, corestate:%x mastername:%s\n",
442 quorate
, corestate
, mastername
);
444 gulm_quorate
= quorate
;
445 if (quorate
&& init_state
== INIT_WAITQUORATE
)
447 clvmd_cluster_init_completed();
448 init_state
= INIT_DONE
;
453 static int core_nodechange(void *misc
, char *nodename
, struct in6_addr
*nodeip
, uint8_t nodestate
)
455 struct node_info
*ninfo
;
457 DEBUGLOG("CORE node change, name=%s, state = %d\n", nodename
, nodestate
);
459 /* If we don't get nodeip here, try a lookup by name */
461 _csid_from_name((char *)nodeip
, nodename
);
465 ninfo
= add_or_set_node(nodename
, nodeip
, nodestate
);
469 /* Check if we need to drop any expired locks */
470 if (ninfo
->state
== NODE_DOWN
)
472 drop_expired_locks(nodename
);
477 static int core_error(void *misc
, uint32_t err
)
479 DEBUGLOG("CORE error: %d\n", err
);
480 // Not sure what happens here
484 /* LOCK callback routines */
485 static int lock_login_reply(void *misc
, uint32_t error
, uint8_t which
)
487 DEBUGLOG("LOCK Got a Login reply. err:%d which:%d\n",
493 /* Drop any expired locks for us that might be hanging around */
494 drop_expired_locks(NULL
);
496 /* Enable locking operations in other threads */
500 pthread_mutex_unlock(&lock_start_mutex
);
506 static int lock_lock_state(void *misc
, uint8_t *key
, uint16_t keylen
,
507 uint64_t subid
, uint64_t start
, uint64_t stop
,
508 uint8_t state
, uint32_t flags
, uint32_t error
,
509 uint8_t *LVB
, uint16_t LVBlen
)
511 struct lock_wait
*lwait
;
513 DEBUGLOG("LOCK lock state: %s, error = %d\n", key
, error
);
515 /* No waiting process to wake up when we are shutting down */
519 lwait
= dm_hash_lookup(lock_hash
, key
);
522 DEBUGLOG("Can't find hash entry for resource %s\n", key
);
525 lwait
->status
= error
;
526 pthread_mutex_lock(&lwait
->mutex
);
527 pthread_cond_signal(&lwait
->cond
);
528 pthread_mutex_unlock(&lwait
->mutex
);
532 static int lock_error(void *misc
, uint32_t err
)
534 DEBUGLOG("LOCK error: %d\n", err
);
535 // Not sure what happens here
541 static lg_core_callbacks_t core_callbacks
= {
542 .login_reply
= core_login_reply
,
543 .nodelist
= core_nodelist
,
544 .statechange
= core_statechange
,
545 .nodechange
= core_nodechange
,
550 static lg_lockspace_callbacks_t lock_callbacks
= {
551 .login_reply
= lock_login_reply
,
552 .lock_state
= lock_lock_state
,
556 /* Allow tcp-comms to loop round the list of active nodes */
557 int get_next_node_csid(void **context
, char *csid
)
559 struct node_info
*ninfo
= NULL
;
564 *context
= dm_hash_get_first(node_hash
);
568 *context
= dm_hash_get_next(node_hash
, *context
);
571 ninfo
= dm_hash_get_data(node_hash
, *context
);
573 /* Find a node that is UP */
574 while (*context
&& ninfo
->state
== NODE_DOWN
)
576 *context
= dm_hash_get_next(node_hash
, *context
);
579 ninfo
= dm_hash_get_data(node_hash
, *context
);
583 if (!*context
|| ninfo
->state
== NODE_DOWN
)
588 memcpy(csid
, dm_hash_get_key(node_hash
, *context
), GULM_MAX_CSID_LEN
);
592 int gulm_name_from_csid(const char *csid
, char *name
)
594 struct node_info
*ninfo
;
596 ninfo
= dm_hash_lookup_binary(node_hash
, csid
, GULM_MAX_CSID_LEN
);
599 sprintf(name
, "UNKNOWN %s", print_csid(csid
));
603 strcpy(name
, ninfo
->name
);
608 static int _csid_from_name(char *csid
, const char *name
)
610 struct dm_hash_node
*hn
;
611 struct node_info
*ninfo
;
613 dm_hash_iterate(hn
, node_hash
)
615 ninfo
= dm_hash_get_data(node_hash
, hn
);
616 if (strcmp(ninfo
->name
, name
) == 0)
618 memcpy(csid
, dm_hash_get_key(node_hash
, hn
), GULM_MAX_CSID_LEN
);
625 static int _get_num_nodes()
627 DEBUGLOG("num_nodes = %d\n", num_nodes
);
631 /* Node is now known to be running a clvmd */
632 void gulm_add_up_node(const char *csid
)
634 struct node_info
*ninfo
;
636 ninfo
= dm_hash_lookup_binary(node_hash
, csid
, GULM_MAX_CSID_LEN
);
638 DEBUGLOG("gulm_add_up_node no node_hash entry for csid %s\n", print_csid(csid
));
642 DEBUGLOG("gulm_add_up_node %s\n", ninfo
->name
);
644 if (ninfo
->state
== NODE_DOWN
)
646 ninfo
->state
= NODE_CLVMD
;
651 /* Node is now known to be NOT running a clvmd */
652 void add_down_node(char *csid
)
654 struct node_info
*ninfo
;
656 ninfo
= dm_hash_lookup_binary(node_hash
, csid
, GULM_MAX_CSID_LEN
);
660 /* Only set it to UP if it was previously known to be
661 running clvmd - gulm may set it DOWN quite soon */
662 if (ninfo
->state
== NODE_CLVMD
)
663 ninfo
->state
= NODE_UP
;
664 drop_expired_locks(ninfo
->name
);
669 /* Call a callback for each node, so the caller knows whether it's up or down */
670 static int _cluster_do_node_callback(struct local_client
*master_client
,
671 void (*callback
)(struct local_client
*, const char *csid
, int node_up
))
673 struct dm_hash_node
*hn
;
674 struct node_info
*ninfo
;
677 dm_hash_iterate(hn
, node_hash
)
679 char csid
[GULM_MAX_CSID_LEN
];
680 struct local_client
*client
;
682 ninfo
= dm_hash_get_data(node_hash
, hn
);
683 memcpy(csid
, dm_hash_get_key(node_hash
, hn
), GULM_MAX_CSID_LEN
);
685 DEBUGLOG("down_callback. node %s, state = %d\n", ninfo
->name
, ninfo
->state
);
687 client
= dm_hash_lookup_binary(sock_hash
, csid
, GULM_MAX_CSID_LEN
);
690 /* If it's up but not connected, try to make contact */
691 if (ninfo
->state
== NODE_UP
)
692 gulm_connect_csid(csid
, &client
);
694 client
= dm_hash_lookup_binary(sock_hash
, csid
, GULM_MAX_CSID_LEN
);
697 DEBUGLOG("down_callback2. node %s, state = %d\n", ninfo
->name
, ninfo
->state
);
698 if (ninfo
->state
!= NODE_DOWN
)
699 callback(master_client
, csid
, ninfo
->state
== NODE_CLVMD
);
701 if (ninfo
->state
!= NODE_CLVMD
)
707 /* Convert gulm error codes to unix errno numbers */
708 static int gulm_to_errno(int gulm_ret
)
712 case lg_err_TryFailed
:
713 case lg_err_AlreadyPend
:
722 return gulm_ret
? -1 : 0;
726 static int _lock_resource(char *resource
, int mode
, int flags
, int *lockid
)
729 struct lock_wait lwait
;
731 /* Wait until the lock module is ready */
734 pthread_mutex_lock(&lock_start_mutex
);
735 pthread_mutex_unlock(&lock_start_mutex
);
738 pthread_cond_init(&lwait
.cond
, NULL
);
739 pthread_mutex_init(&lwait
.mutex
, NULL
);
740 pthread_mutex_lock(&lwait
.mutex
);
742 /* This needs to be converted from DLM/LVM2 value for GULM */
743 if (flags
& LKF_NOQUEUE
) flags
= lg_lock_flag_Try
;
745 dm_hash_insert(lock_hash
, resource
, &lwait
);
746 DEBUGLOG("lock_resource '%s', flags=%d, mode=%d\n", resource
, flags
, mode
);
748 status
= lg_lock_state_req(gulm_if
, resource
, strlen(resource
)+1,
750 mode
, flags
, NULL
, 0);
753 DEBUGLOG("lg_lock_state returned %d\n", status
);
757 /* Wait for it to complete */
758 pthread_cond_wait(&lwait
.cond
, &lwait
.mutex
);
759 pthread_mutex_unlock(&lwait
.mutex
);
761 dm_hash_remove(lock_hash
, resource
);
762 DEBUGLOG("lock-resource returning %d\n", lwait
.status
);
764 return gulm_to_errno(lwait
.status
);
768 static int _unlock_resource(char *resource
, int lockid
)
771 struct lock_wait lwait
;
773 pthread_cond_init(&lwait
.cond
, NULL
);
774 pthread_mutex_init(&lwait
.mutex
, NULL
);
775 pthread_mutex_lock(&lwait
.mutex
);
777 dm_hash_insert(lock_hash
, resource
, &lwait
);
779 DEBUGLOG("unlock_resource %s\n", resource
);
780 status
= lg_lock_state_req(gulm_if
, resource
, strlen(resource
)+1,
782 lg_lock_state_Unlock
, 0, NULL
, 0);
786 DEBUGLOG("lg_lock_state(unlock) returned %d\n", status
);
790 /* When we are shutting down, don't wait for unlocks
791 to be acknowledged, just do it. */
795 /* Wait for it to complete */
797 pthread_cond_wait(&lwait
.cond
, &lwait
.mutex
);
798 pthread_mutex_unlock(&lwait
.mutex
);
800 dm_hash_remove(lock_hash
, resource
);
802 return gulm_to_errno(lwait
.status
);
806 /* These two locking functions MUST be called in a seperate thread from
807 the clvmd main loop because they expect to be woken up by it.
809 These are abstractions around the real locking functions (above)
810 as we need to emulate the DLM's EX/PW/CW interaction with GULM using
812 To aid unlocking, we store the lock mode in the lockid (as GULM
815 static int _sync_lock(const char *resource
, int mode
, int flags
, int *lockid
)
818 char lock1
[strlen(resource
)+3];
819 char lock2
[strlen(resource
)+3];
821 snprintf(lock1
, sizeof(lock1
), "%s-1", resource
);
822 snprintf(lock2
, sizeof(lock2
), "%s-2", resource
);
827 status
= _lock_resource(lock1
, lg_lock_state_Exclusive
, flags
, lockid
);
831 /* If we can't get this lock too then bail out */
832 status
= _lock_resource(lock2
, lg_lock_state_Exclusive
, LCK_NONBLOCK
, lockid
);
833 if (status
== lg_err_TryFailed
)
835 _unlock_resource(lock1
, *lockid
);
843 status
= _lock_resource(lock1
, lg_lock_state_Shared
, flags
, lockid
);
846 status
= _unlock_resource(lock2
, *lockid
);
850 status
= _lock_resource(lock2
, lg_lock_state_Exclusive
, flags
, lockid
);
853 status
= _unlock_resource(lock1
, *lockid
);
866 static int _sync_unlock(const char *resource
, int lockid
)
869 char lock1
[strlen(resource
)+3];
870 char lock2
[strlen(resource
)+3];
872 snprintf(lock1
, sizeof(lock1
), "%s-1", resource
);
873 snprintf(lock2
, sizeof(lock2
), "%s-2", resource
);
875 /* The held lock mode is in the lock id */
876 assert(lockid
== LCK_EXCL
||
877 lockid
== LCK_READ
||
878 lockid
== LCK_PREAD
||
879 lockid
== LCK_WRITE
);
881 status
= _unlock_resource(lock1
, lockid
);
883 status
= _unlock_resource(lock2
, lockid
);
888 static int _is_quorate()
893 /* Get all the cluster node names & IPs from CCS and
894 add them to our node list so we know who to talk to.
895 Called when we start up and if we get sent SIGHUP.
897 static int get_all_cluster_nodes()
904 /* Open the config file */
905 ctree
= ccs_force_connect(NULL
, 1);
908 log_error("Error connecting to CCS");
915 char nodeip
[GULM_MAX_CSID_LEN
];
920 sprintf(nodekey
, "//cluster/clusternodes/clusternode[%d]/@name", i
);
921 error
= ccs_get(ctree
, nodekey
, &nodename
);
925 sprintf(key
, "//cluster/clusternodes/clusternode[@name=\"%s\"]/clvm", nodename
);
926 if (!ccs_get(ctree
, key
, &clvmflagstr
))
928 clvmflag
= atoi(clvmflagstr
);
932 DEBUGLOG("Got node %s from ccs(clvmflag = %d)\n", nodename
, clvmflag
);
933 if ((get_ip_address(nodename
, nodeip
) == 0) && clvmflag
)
935 struct node_info
*ninfo
;
937 /* If it's not in the list, then add it */
938 ninfo
= dm_hash_lookup_binary(node_hash
, nodeip
, GULM_MAX_CSID_LEN
);
941 ninfo
= malloc(sizeof(struct node_info
));
944 syslog(LOG_ERR
, "Cannot alloc memory for node info\n");
945 ccs_disconnect(ctree
);
948 strcpy(ninfo
->name
, nodename
);
950 ninfo
->state
= NODE_DOWN
;
951 dm_hash_insert_binary(node_hash
, nodeip
, GULM_MAX_CSID_LEN
, ninfo
);
957 DEBUGLOG("node %s has clvm disabled\n", nodename
);
960 DEBUGLOG("Cannot resolve host name %s\n", nodename
);
961 log_error("Cannot resolve host name %s\n", nodename
);
967 /* Finished with config file */
968 ccs_disconnect(ctree
);
973 static int _get_main_cluster_fd(void)
975 return get_main_gulm_cluster_fd();
978 static int _cluster_fd_callback(struct local_client
*fd
, char *buf
, int len
, const char *csid
, struct local_client
**new_client
)
980 return cluster_fd_gulm_callback(fd
, buf
, len
, csid
, new_client
);
983 static int _cluster_send_message(const void *buf
, int msglen
, const char *csid
, const char *errtext
)
985 return gulm_cluster_send_message((char *)buf
, msglen
, csid
, errtext
);
988 static int _get_cluster_name(char *buf
, int buflen
)
990 strncpy(buf
, cluster_name
, buflen
);
994 static struct cluster_ops _cluster_gulm_ops
= {
995 .cluster_init_completed
= NULL
,
996 .cluster_send_message
= _cluster_send_message
,
997 .name_from_csid
= gulm_name_from_csid
,
998 .csid_from_name
= _csid_from_name
,
999 .get_num_nodes
= _get_num_nodes
,
1000 .cluster_fd_callback
= _cluster_fd_callback
,
1001 .get_main_cluster_fd
= _get_main_cluster_fd
,
1002 .cluster_do_node_callback
= _cluster_do_node_callback
,
1003 .is_quorate
= _is_quorate
,
1004 .get_our_csid
= _get_our_csid
,
1005 .add_up_node
= gulm_add_up_node
,
1006 .reread_config
= _reread_config
,
1007 .cluster_closedown
= _cluster_closedown
,
1008 .get_cluster_name
= _get_cluster_name
,
1009 .sync_lock
= _sync_lock
,
1010 .sync_unlock
= _sync_unlock
,
1013 struct cluster_ops
*init_gulm_cluster(void)
1015 if (!_init_cluster())
1016 return &_cluster_gulm_ops
;