2 Copyright Red Hat, Inc. 2003
4 The Red Hat Cluster Manager API Library is free software; you can
5 redistribute it and/or modify it under the terms of the GNU Lesser
6 General Public License as published by the Free Software Foundation;
7 either version 2.1 of the License, or (at your option) any later
10 The Red Hat Cluster Manager API Library is distributed in the hope
11 that it will be useful, but WITHOUT ANY WARRANTY; without even the
12 implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR
13 PURPOSE. See the GNU Lesser General Public License for more details.
15 You should have received a copy of the GNU Lesser General Public
16 License along with this library; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
21 * Quorum API functions.
30 static int __local_member_id
= -1;
31 static pthread_mutex_t __local_member_id_lock
= PTHREAD_MUTEX_INITIALIZER
;
34 * Register for quorum and/or quorum-proxied membership events.
36 * @return File descriptor, or -1 if clumembd couldn't be
46 * Talks to the local node. If the user hasn't read the configuration
47 * file, it talks to the loopback interface, since the quorum
48 * daemon will also be listening on it.
50 fd
= tcp_localconnect(CM_QUORUM_PORT
);
54 msg
.eh_magic
= CLUSTER_MAGIC
;
55 msg
.eh_type
= EV_REGISTER
;
56 msg
.eh_length
= sizeof(msg
);
58 swab_cm_event_hdr_t(&msg
);
63 if (tcp_send(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
71 if (tcp_receive(fd
, (void *)&msg
, sizeof(msg
)) !=
78 swab_cm_event_hdr_t(&msg
);
80 if (msg
.eh_type
!= EV_ACK
) {
91 * Query the quorum daemon synchronously for the current status of
92 * quorum. This ALWAYS talks to the local node, and so running it from
93 * remote is not possible. This does not require the caller to know anything
94 * about the cluster configuration.
96 * @param result Unallocated space which will be allocated within
97 * and returned to the user.
98 * @return -1 on failure, 0 on success.
101 quorum_query(cm_event_t
**result
)
111 * Talks to the local node. If the user hasn't read the configuration
112 * file, it talks to the loopback interface, since the quorum
113 * daemon will also be listening on it.
115 fd
= tcp_localconnect(34003);
120 msg
.eh_magic
= CLUSTER_MAGIC
;
121 msg
.eh_type
= QUORUM_QUERY
;
122 msg
.eh_length
= sizeof(msg
);
124 swab_cm_event_hdr_t(&msg
);
129 if (tcp_send(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
134 *result
= malloc(sizeof(*view
));
135 view
= (cm_event_t
*)*result
;
140 if (tcp_receive(fd
, (void *)view
, sizeof(*view
)) !=
148 swab_cm_event_hdr_t(&view
->em_header
);
149 swab_cm_quorum_event_t(&view
->u
.ev_quorum
, counter
);
151 pthread_mutex_lock(&__local_member_id_lock
);
152 if (__local_member_id
== -1)
153 __local_member_id
= cm_ev_memberid(view
);
154 pthread_mutex_unlock(&__local_member_id_lock
);
162 * Query the quorum daemon synchronously for the current status of
163 * quorum tiebreaker. This ALWAYS talks to the local node, and so running it
164 * from remote is not possible. This does not require the caller to know
165 * anything about the cluster configuration.
167 * @param result Unallocated space which will be allocated within
168 * and returned to the user.
169 * @return -1 on failure, 0 on success.
172 quorum_query_tb(cm_event_t
**result
)
182 * Talks to the local node. If the user hasn't read the configuration
183 * file, it talks to the loopback interface, since the quorum
184 * daemon will also be listening on it.
186 fd
= tcp_localconnect(34003);
191 msg
.eh_magic
= CLUSTER_MAGIC
;
192 msg
.eh_type
= QUORUM_QUERY_TB
;
193 msg
.eh_length
= sizeof(msg
);
195 swab_cm_event_hdr_t(&msg
);
200 if (tcp_send(fd
, &msg
, sizeof(msg
)) != sizeof(msg
)) {
205 *result
= malloc(sizeof(*view
));
206 view
= (cm_event_t
*)*result
;
211 if (tcp_receive(fd
, (void *)view
, sizeof(view
->em_header
)) !=
212 sizeof(view
->em_header
)) {
219 swab_cm_event_hdr_t(&view
->em_header
);
221 pthread_mutex_lock(&__local_member_id_lock
);
222 if (__local_member_id
== -1)
223 __local_member_id
= cm_ev_memberid(view
);
224 pthread_mutex_unlock(&__local_member_id_lock
);