2 ctdb_call protocol code
4 Copyright (C) Andrew Tridgell 2006
6 This program is free software; you can redistribute it and/or modify
7 it under the terms of the GNU General Public License as published by
8 the Free Software Foundation; either version 3 of the License, or
9 (at your option) any later version.
11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details.
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, see <http://www.gnu.org/licenses/>.
20 see http://wiki.samba.org/index.php/Samba_%26_Clustering for
21 protocol design and packet details
24 #include "system/network.h"
25 #include "system/filesys.h"
30 #include "lib/util/dlinklist.h"
31 #include "lib/util/debug.h"
32 #include "lib/util/samba_util.h"
33 #include "lib/util/sys_rw.h"
34 #include "lib/util/util_process.h"
36 #include "ctdb_private.h"
37 #include "ctdb_client.h"
39 #include "common/rb_tree.h"
40 #include "common/reqid.h"
41 #include "common/system.h"
42 #include "common/common.h"
43 #include "common/logging.h"
44 #include "common/hash_count.h"
46 struct ctdb_sticky_record
{
47 struct ctdb_context
*ctdb
;
48 struct ctdb_db_context
*ctdb_db
;
53 find the ctdb_db from a db index
55 struct ctdb_db_context
*find_ctdb_db(struct ctdb_context
*ctdb
, uint32_t id
)
57 struct ctdb_db_context
*ctdb_db
;
59 for (ctdb_db
=ctdb
->db_list
; ctdb_db
; ctdb_db
=ctdb_db
->next
) {
60 if (ctdb_db
->db_id
== id
) {
68 a variant of input packet that can be used in lock requeue
70 static void ctdb_call_input_pkt(void *p
, struct ctdb_req_header
*hdr
)
72 struct ctdb_context
*ctdb
= talloc_get_type(p
, struct ctdb_context
);
73 ctdb_input_pkt(ctdb
, hdr
);
80 static void ctdb_send_error(struct ctdb_context
*ctdb
,
81 struct ctdb_req_header
*hdr
, uint32_t status
,
82 const char *fmt
, ...) PRINTF_ATTRIBUTE(4,5);
83 static void ctdb_send_error(struct ctdb_context
*ctdb
,
84 struct ctdb_req_header
*hdr
, uint32_t status
,
88 struct ctdb_reply_error_old
*r
;
92 if (ctdb
->methods
== NULL
) {
93 DEBUG(DEBUG_INFO
,(__location__
" Failed to send error. Transport is DOWN\n"));
98 msg
= talloc_vasprintf(ctdb
, fmt
, ap
);
100 ctdb_fatal(ctdb
, "Unable to allocate error in ctdb_send_error\n");
104 msglen
= strlen(msg
)+1;
105 len
= offsetof(struct ctdb_reply_error_old
, msg
);
106 r
= ctdb_transport_allocate(ctdb
, msg
, CTDB_REPLY_ERROR
, len
+ msglen
,
107 struct ctdb_reply_error_old
);
108 CTDB_NO_MEMORY_FATAL(ctdb
, r
);
110 r
->hdr
.destnode
= hdr
->srcnode
;
111 r
->hdr
.reqid
= hdr
->reqid
;
114 memcpy(&r
->msg
[0], msg
, msglen
);
116 ctdb_queue_packet(ctdb
, &r
->hdr
);
123 * send a redirect reply
125 * The logic behind this function is this:
127 * A client wants to grab a record and sends a CTDB_REQ_CALL packet
128 * to its local ctdb (ctdb_request_call). If the node is not itself
129 * the record's DMASTER, it first redirects the packet to the
130 * record's LMASTER. The LMASTER then redirects the call packet to
131 * the current DMASTER. Note that this works because of this: When
132 * a record is migrated off a node, then the new DMASTER is stored
133 * in the record's copy on the former DMASTER.
135 static void ctdb_call_send_redirect(struct ctdb_context
*ctdb
,
136 struct ctdb_db_context
*ctdb_db
,
138 struct ctdb_req_call_old
*c
,
139 struct ctdb_ltdb_header
*header
)
141 uint32_t lmaster
= ctdb_lmaster(ctdb
, &key
);
143 c
->hdr
.destnode
= lmaster
;
144 if (ctdb
->pnn
== lmaster
) {
145 c
->hdr
.destnode
= header
->dmaster
;
149 if (c
->hopcount
%100 > 95) {
150 DEBUG(DEBUG_WARNING
,("High hopcount %d dbid:%s "
151 "key:0x%08x reqid=%08x pnn:%d src:%d lmaster:%d "
152 "header->dmaster:%d dst:%d\n",
153 c
->hopcount
, ctdb_db
->db_name
, ctdb_hash(&key
),
154 c
->hdr
.reqid
, ctdb
->pnn
, c
->hdr
.srcnode
, lmaster
,
155 header
->dmaster
, c
->hdr
.destnode
));
158 ctdb_queue_packet(ctdb
, &c
->hdr
);
165 caller must have the chainlock before calling this routine. Caller must be
168 static void ctdb_send_dmaster_reply(struct ctdb_db_context
*ctdb_db
,
169 struct ctdb_ltdb_header
*header
,
170 TDB_DATA key
, TDB_DATA data
,
171 uint32_t new_dmaster
,
174 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
175 struct ctdb_reply_dmaster_old
*r
;
179 if (ctdb
->pnn
!= ctdb_lmaster(ctdb
, &key
)) {
180 DEBUG(DEBUG_ALERT
,(__location__
" Caller is not lmaster!\n"));
184 header
->dmaster
= new_dmaster
;
185 ret
= ctdb_ltdb_store(ctdb_db
, key
, header
, data
);
187 ctdb_fatal(ctdb
, "ctdb_send_dmaster_reply unable to update dmaster");
191 if (ctdb
->methods
== NULL
) {
192 ctdb_fatal(ctdb
, "ctdb_send_dmaster_reply can't update dmaster since transport is down");
196 /* put the packet on a temporary context, allowing us to safely free
197 it below even if ctdb_reply_dmaster() has freed it already */
198 tmp_ctx
= talloc_new(ctdb
);
200 /* send the CTDB_REPLY_DMASTER */
201 len
= offsetof(struct ctdb_reply_dmaster_old
, data
) + key
.dsize
+ data
.dsize
+ sizeof(uint32_t);
202 r
= ctdb_transport_allocate(ctdb
, tmp_ctx
, CTDB_REPLY_DMASTER
, len
,
203 struct ctdb_reply_dmaster_old
);
204 CTDB_NO_MEMORY_FATAL(ctdb
, r
);
206 r
->hdr
.destnode
= new_dmaster
;
207 r
->hdr
.reqid
= reqid
;
208 r
->hdr
.generation
= ctdb_db
->generation
;
209 r
->rsn
= header
->rsn
;
210 r
->keylen
= key
.dsize
;
211 r
->datalen
= data
.dsize
;
212 r
->db_id
= ctdb_db
->db_id
;
213 memcpy(&r
->data
[0], key
.dptr
, key
.dsize
);
214 memcpy(&r
->data
[key
.dsize
], data
.dptr
, data
.dsize
);
215 memcpy(&r
->data
[key
.dsize
+data
.dsize
], &header
->flags
, sizeof(uint32_t));
217 ctdb_queue_packet(ctdb
, &r
->hdr
);
219 talloc_free(tmp_ctx
);
223 send a dmaster request (give another node the dmaster for a record)
225 This is always sent to the lmaster, which ensures that the lmaster
226 always knows who the dmaster is. The lmaster will then send a
227 CTDB_REPLY_DMASTER to the new dmaster
229 static void ctdb_call_send_dmaster(struct ctdb_db_context
*ctdb_db
,
230 struct ctdb_req_call_old
*c
,
231 struct ctdb_ltdb_header
*header
,
232 TDB_DATA
*key
, TDB_DATA
*data
)
234 struct ctdb_req_dmaster_old
*r
;
235 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
237 uint32_t lmaster
= ctdb_lmaster(ctdb
, key
);
239 if (ctdb
->methods
== NULL
) {
240 ctdb_fatal(ctdb
, "Failed ctdb_call_send_dmaster since transport is down");
244 if (data
->dsize
!= 0) {
245 header
->flags
|= CTDB_REC_FLAG_MIGRATED_WITH_DATA
;
248 if (lmaster
== ctdb
->pnn
) {
249 ctdb_send_dmaster_reply(ctdb_db
, header
, *key
, *data
,
250 c
->hdr
.srcnode
, c
->hdr
.reqid
);
254 len
= offsetof(struct ctdb_req_dmaster_old
, data
) + key
->dsize
+ data
->dsize
256 r
= ctdb_transport_allocate(ctdb
, ctdb
, CTDB_REQ_DMASTER
, len
,
257 struct ctdb_req_dmaster_old
);
258 CTDB_NO_MEMORY_FATAL(ctdb
, r
);
259 r
->hdr
.destnode
= lmaster
;
260 r
->hdr
.reqid
= c
->hdr
.reqid
;
261 r
->hdr
.generation
= ctdb_db
->generation
;
263 r
->rsn
= header
->rsn
;
264 r
->dmaster
= c
->hdr
.srcnode
;
265 r
->keylen
= key
->dsize
;
266 r
->datalen
= data
->dsize
;
267 memcpy(&r
->data
[0], key
->dptr
, key
->dsize
);
268 memcpy(&r
->data
[key
->dsize
], data
->dptr
, data
->dsize
);
269 memcpy(&r
->data
[key
->dsize
+ data
->dsize
], &header
->flags
, sizeof(uint32_t));
271 header
->dmaster
= c
->hdr
.srcnode
;
272 if (ctdb_ltdb_store(ctdb_db
, *key
, header
, *data
) != 0) {
273 ctdb_fatal(ctdb
, "Failed to store record in ctdb_call_send_dmaster");
276 ctdb_queue_packet(ctdb
, &r
->hdr
);
281 static void ctdb_sticky_pindown_timeout(struct tevent_context
*ev
,
282 struct tevent_timer
*te
,
283 struct timeval t
, void *private_data
)
285 struct ctdb_sticky_record
*sr
= talloc_get_type(private_data
,
286 struct ctdb_sticky_record
);
288 DEBUG(DEBUG_ERR
,("Pindown timeout db:%s unstick record\n", sr
->ctdb_db
->db_name
));
289 if (sr
->pindown
!= NULL
) {
290 talloc_free(sr
->pindown
);
296 ctdb_set_sticky_pindown(struct ctdb_context
*ctdb
, struct ctdb_db_context
*ctdb_db
, TDB_DATA key
)
298 TALLOC_CTX
*tmp_ctx
= talloc_new(NULL
);
300 struct ctdb_sticky_record
*sr
;
302 k
= ctdb_key_to_idkey(tmp_ctx
, key
);
304 DEBUG(DEBUG_ERR
,("Failed to allocate key for sticky record\n"));
305 talloc_free(tmp_ctx
);
309 sr
= trbt_lookuparray32(ctdb_db
->sticky_records
, k
[0], &k
[0]);
311 talloc_free(tmp_ctx
);
315 talloc_free(tmp_ctx
);
317 if (sr
->pindown
== NULL
) {
318 DEBUG(DEBUG_ERR
,("Pinning down record in %s for %d ms\n", ctdb_db
->db_name
, ctdb
->tunable
.sticky_pindown
));
319 sr
->pindown
= talloc_new(sr
);
320 if (sr
->pindown
== NULL
) {
321 DEBUG(DEBUG_ERR
,("Failed to allocate pindown context for sticky record\n"));
324 tevent_add_timer(ctdb
->ev
, sr
->pindown
,
325 timeval_current_ofs(ctdb
->tunable
.sticky_pindown
/ 1000,
326 (ctdb
->tunable
.sticky_pindown
* 1000) % 1000000),
327 ctdb_sticky_pindown_timeout
, sr
);
334 called when a CTDB_REPLY_DMASTER packet comes in, or when the lmaster
335 gets a CTDB_REQUEST_DMASTER for itself. We become the dmaster.
337 must be called with the chainlock held. This function releases the chainlock
339 static void ctdb_become_dmaster(struct ctdb_db_context
*ctdb_db
,
340 struct ctdb_req_header
*hdr
,
341 TDB_DATA key
, TDB_DATA data
,
342 uint64_t rsn
, uint32_t record_flags
)
344 struct ctdb_call_state
*state
;
345 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
346 struct ctdb_ltdb_header header
;
349 DEBUG(DEBUG_DEBUG
,("pnn %u dmaster response %08x\n", ctdb
->pnn
, ctdb_hash(&key
)));
353 header
.dmaster
= ctdb
->pnn
;
354 header
.flags
= record_flags
;
356 state
= reqid_find(ctdb
->idr
, hdr
->reqid
, struct ctdb_call_state
);
359 if (state
->call
->flags
& CTDB_CALL_FLAG_VACUUM_MIGRATION
) {
361 * We temporarily add the VACUUM_MIGRATED flag to
362 * the record flags, so that ctdb_ltdb_store can
363 * decide whether the record should be stored or
366 header
.flags
|= CTDB_REC_FLAG_VACUUM_MIGRATED
;
370 if (ctdb_ltdb_store(ctdb_db
, key
, &header
, data
) != 0) {
371 ctdb_fatal(ctdb
, "ctdb_reply_dmaster store failed\n");
373 ret
= ctdb_ltdb_unlock(ctdb_db
, key
);
375 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
380 /* we just became DMASTER and this database is "sticky",
381 see if the record is flagged as "hot" and set up a pin-down
382 context to stop migrations for a little while if so
384 if (ctdb_db_sticky(ctdb_db
)) {
385 ctdb_set_sticky_pindown(ctdb
, ctdb_db
, key
);
389 DEBUG(DEBUG_ERR
,("pnn %u Invalid reqid %u in ctdb_become_dmaster from node %u\n",
390 ctdb
->pnn
, hdr
->reqid
, hdr
->srcnode
));
392 ret
= ctdb_ltdb_unlock(ctdb_db
, key
);
394 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
399 if (key
.dsize
!= state
->call
->key
.dsize
|| memcmp(key
.dptr
, state
->call
->key
.dptr
, key
.dsize
)) {
400 DEBUG(DEBUG_ERR
, ("Got bogus DMASTER packet reqid:%u from node %u. Key does not match key held in matching idr.\n", hdr
->reqid
, hdr
->srcnode
));
402 ret
= ctdb_ltdb_unlock(ctdb_db
, key
);
404 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
409 if (hdr
->reqid
!= state
->reqid
) {
410 /* we found a record but it was the wrong one */
411 DEBUG(DEBUG_ERR
, ("Dropped orphan in ctdb_become_dmaster with reqid:%u\n from node %u", hdr
->reqid
, hdr
->srcnode
));
413 ret
= ctdb_ltdb_unlock(ctdb_db
, key
);
415 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
420 (void) hash_count_increment(ctdb_db
->migratedb
, key
);
422 ctdb_call_local(ctdb_db
, state
->call
, &header
, state
, &data
, true);
424 ret
= ctdb_ltdb_unlock(ctdb_db
, state
->call
->key
);
426 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
429 state
->state
= CTDB_CALL_DONE
;
430 if (state
->async
.fn
) {
431 state
->async
.fn(state
);
435 struct dmaster_defer_call
{
436 struct dmaster_defer_call
*next
, *prev
;
437 struct ctdb_context
*ctdb
;
438 struct ctdb_req_header
*hdr
;
441 struct dmaster_defer_queue
{
442 struct ctdb_db_context
*ctdb_db
;
444 struct dmaster_defer_call
*deferred_calls
;
447 static void dmaster_defer_reprocess(struct tevent_context
*ev
,
448 struct tevent_timer
*te
,
452 struct dmaster_defer_call
*call
= talloc_get_type(
453 private_data
, struct dmaster_defer_call
);
455 ctdb_input_pkt(call
->ctdb
, call
->hdr
);
459 static int dmaster_defer_queue_destructor(struct dmaster_defer_queue
*ddq
)
461 /* Ignore requests, if database recovery happens in-between. */
462 if (ddq
->generation
!= ddq
->ctdb_db
->generation
) {
466 while (ddq
->deferred_calls
!= NULL
) {
467 struct dmaster_defer_call
*call
= ddq
->deferred_calls
;
469 DLIST_REMOVE(ddq
->deferred_calls
, call
);
471 talloc_steal(call
->ctdb
, call
);
472 tevent_add_timer(call
->ctdb
->ev
, call
, timeval_zero(),
473 dmaster_defer_reprocess
, call
);
478 static void *insert_ddq_callback(void *parm
, void *data
)
487 * This function is used to register a key in database that needs to be updated.
488 * Any requests for that key should get deferred till this is completed.
490 static int dmaster_defer_setup(struct ctdb_db_context
*ctdb_db
,
491 struct ctdb_req_header
*hdr
,
495 struct dmaster_defer_queue
*ddq
;
497 k
= ctdb_key_to_idkey(hdr
, key
);
499 DEBUG(DEBUG_ERR
, ("Failed to allocate key for dmaster defer setup\n"));
504 ddq
= trbt_lookuparray32(ctdb_db
->defer_dmaster
, k
[0], k
);
506 if (ddq
->generation
== ctdb_db
->generation
) {
511 /* Recovery occurred - get rid of old queue. All the deferred
512 * requests will be resent anyway from ctdb_call_resend_db.
517 ddq
= talloc(hdr
, struct dmaster_defer_queue
);
519 DEBUG(DEBUG_ERR
, ("Failed to allocate dmaster defer queue\n"));
523 ddq
->ctdb_db
= ctdb_db
;
524 ddq
->generation
= hdr
->generation
;
525 ddq
->deferred_calls
= NULL
;
527 trbt_insertarray32_callback(ctdb_db
->defer_dmaster
, k
[0], k
,
528 insert_ddq_callback
, ddq
);
529 talloc_set_destructor(ddq
, dmaster_defer_queue_destructor
);
535 static int dmaster_defer_add(struct ctdb_db_context
*ctdb_db
,
536 struct ctdb_req_header
*hdr
,
539 struct dmaster_defer_queue
*ddq
;
540 struct dmaster_defer_call
*call
;
543 k
= ctdb_key_to_idkey(hdr
, key
);
545 DEBUG(DEBUG_ERR
, ("Failed to allocate key for dmaster defer add\n"));
549 ddq
= trbt_lookuparray32(ctdb_db
->defer_dmaster
, k
[0], k
);
557 if (ddq
->generation
!= hdr
->generation
) {
558 talloc_set_destructor(ddq
, NULL
);
563 call
= talloc(ddq
, struct dmaster_defer_call
);
565 DEBUG(DEBUG_ERR
, ("Failed to allocate dmaster defer call\n"));
569 call
->ctdb
= ctdb_db
->ctdb
;
570 call
->hdr
= talloc_steal(call
, hdr
);
572 DLIST_ADD_END(ddq
->deferred_calls
, call
);
578 called when a CTDB_REQ_DMASTER packet comes in
580 this comes into the lmaster for a record when the current dmaster
581 wants to give up the dmaster role and give it to someone else
583 void ctdb_request_dmaster(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
585 struct ctdb_req_dmaster_old
*c
= (struct ctdb_req_dmaster_old
*)hdr
;
586 TDB_DATA key
, data
, data2
;
587 struct ctdb_ltdb_header header
;
588 struct ctdb_db_context
*ctdb_db
;
589 uint32_t record_flags
= 0;
594 key
.dsize
= c
->keylen
;
595 data
.dptr
= c
->data
+ c
->keylen
;
596 data
.dsize
= c
->datalen
;
597 len
= offsetof(struct ctdb_req_dmaster_old
, data
) + key
.dsize
+ data
.dsize
599 if (len
<= c
->hdr
.length
) {
600 memcpy(&record_flags
, &c
->data
[c
->keylen
+ c
->datalen
],
601 sizeof(record_flags
));
604 ctdb_db
= find_ctdb_db(ctdb
, c
->db_id
);
606 ctdb_send_error(ctdb
, hdr
, -1,
607 "Unknown database in request. db_id==0x%08x",
612 dmaster_defer_setup(ctdb_db
, hdr
, key
);
614 /* fetch the current record */
615 ret
= ctdb_ltdb_lock_fetch_requeue(ctdb_db
, key
, &header
, hdr
, &data2
,
616 ctdb_call_input_pkt
, ctdb
, false);
618 ctdb_fatal(ctdb
, "ctdb_req_dmaster failed to fetch record");
622 DEBUG(DEBUG_INFO
,(__location__
" deferring ctdb_request_dmaster\n"));
626 if (ctdb_lmaster(ctdb
, &key
) != ctdb
->pnn
) {
627 DEBUG(DEBUG_ERR
, ("dmaster request to non-lmaster "
628 "db=%s lmaster=%u gen=%u curgen=%u\n",
629 ctdb_db
->db_name
, ctdb_lmaster(ctdb
, &key
),
630 hdr
->generation
, ctdb_db
->generation
));
631 ctdb_fatal(ctdb
, "ctdb_req_dmaster to non-lmaster");
634 DEBUG(DEBUG_DEBUG
,("pnn %u dmaster request on %08x for %u from %u\n",
635 ctdb
->pnn
, ctdb_hash(&key
), c
->dmaster
, c
->hdr
.srcnode
));
637 /* its a protocol error if the sending node is not the current dmaster */
638 if (header
.dmaster
!= hdr
->srcnode
) {
639 DEBUG(DEBUG_ALERT
,("pnn %u dmaster request for new-dmaster %u from non-master %u real-dmaster=%u key %08x dbid 0x%08x gen=%u curgen=%u c->rsn=%llu header.rsn=%llu reqid=%u keyval=0x%08x\n",
640 ctdb
->pnn
, c
->dmaster
, hdr
->srcnode
, header
.dmaster
, ctdb_hash(&key
),
641 ctdb_db
->db_id
, hdr
->generation
, ctdb
->vnn_map
->generation
,
642 (unsigned long long)c
->rsn
, (unsigned long long)header
.rsn
, c
->hdr
.reqid
,
643 (key
.dsize
>= 4)?(*(uint32_t *)key
.dptr
):0));
644 if (header
.rsn
!= 0 || header
.dmaster
!= ctdb
->pnn
) {
645 DEBUG(DEBUG_ERR
,("ctdb_req_dmaster from non-master. Force a recovery.\n"));
647 ctdb
->recovery_mode
= CTDB_RECOVERY_ACTIVE
;
648 ctdb_ltdb_unlock(ctdb_db
, key
);
653 if (header
.rsn
> c
->rsn
) {
654 DEBUG(DEBUG_ALERT
,("pnn %u dmaster request with older RSN new-dmaster %u from %u real-dmaster=%u key %08x dbid 0x%08x gen=%u curgen=%u c->rsn=%llu header.rsn=%llu reqid=%u\n",
655 ctdb
->pnn
, c
->dmaster
, hdr
->srcnode
, header
.dmaster
, ctdb_hash(&key
),
656 ctdb_db
->db_id
, hdr
->generation
, ctdb
->vnn_map
->generation
,
657 (unsigned long long)c
->rsn
, (unsigned long long)header
.rsn
, c
->hdr
.reqid
));
660 /* use the rsn from the sending node */
663 /* store the record flags from the sending node */
664 header
.flags
= record_flags
;
666 /* check if the new dmaster is the lmaster, in which case we
667 skip the dmaster reply */
668 if (c
->dmaster
== ctdb
->pnn
) {
669 ctdb_become_dmaster(ctdb_db
, hdr
, key
, data
, c
->rsn
, record_flags
);
671 ctdb_send_dmaster_reply(ctdb_db
, &header
, key
, data
, c
->dmaster
, hdr
->reqid
);
673 ret
= ctdb_ltdb_unlock(ctdb_db
, key
);
675 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
680 static void ctdb_sticky_record_timeout(struct tevent_context
*ev
,
681 struct tevent_timer
*te
,
682 struct timeval t
, void *private_data
)
684 struct ctdb_sticky_record
*sr
= talloc_get_type(private_data
,
685 struct ctdb_sticky_record
);
689 static void *ctdb_make_sticky_record_callback(void *parm
, void *data
)
692 DEBUG(DEBUG_ERR
,("Already have sticky record registered. Free old %p and create new %p\n", data
, parm
));
699 ctdb_make_record_sticky(struct ctdb_context
*ctdb
, struct ctdb_db_context
*ctdb_db
, TDB_DATA key
)
701 TALLOC_CTX
*tmp_ctx
= talloc_new(NULL
);
703 struct ctdb_sticky_record
*sr
;
705 k
= ctdb_key_to_idkey(tmp_ctx
, key
);
707 DEBUG(DEBUG_ERR
,("Failed to allocate key for sticky record\n"));
708 talloc_free(tmp_ctx
);
712 sr
= trbt_lookuparray32(ctdb_db
->sticky_records
, k
[0], &k
[0]);
714 talloc_free(tmp_ctx
);
718 sr
= talloc(ctdb_db
->sticky_records
, struct ctdb_sticky_record
);
720 talloc_free(tmp_ctx
);
721 DEBUG(DEBUG_ERR
,("Failed to allocate sticky record structure\n"));
726 sr
->ctdb_db
= ctdb_db
;
729 DEBUG(DEBUG_ERR
,("Make record sticky for %d seconds in db %s key:0x%08x.\n",
730 ctdb
->tunable
.sticky_duration
,
731 ctdb_db
->db_name
, ctdb_hash(&key
)));
733 trbt_insertarray32_callback(ctdb_db
->sticky_records
, k
[0], &k
[0], ctdb_make_sticky_record_callback
, sr
);
735 tevent_add_timer(ctdb
->ev
, sr
,
736 timeval_current_ofs(ctdb
->tunable
.sticky_duration
, 0),
737 ctdb_sticky_record_timeout
, sr
);
739 talloc_free(tmp_ctx
);
743 struct pinned_down_requeue_handle
{
744 struct ctdb_context
*ctdb
;
745 struct ctdb_req_header
*hdr
;
748 struct pinned_down_deferred_call
{
749 struct ctdb_context
*ctdb
;
750 struct ctdb_req_header
*hdr
;
753 static void pinned_down_requeue(struct tevent_context
*ev
,
754 struct tevent_timer
*te
,
755 struct timeval t
, void *private_data
)
757 struct pinned_down_requeue_handle
*handle
= talloc_get_type(private_data
, struct pinned_down_requeue_handle
);
758 struct ctdb_context
*ctdb
= handle
->ctdb
;
760 talloc_steal(ctdb
, handle
->hdr
);
761 ctdb_call_input_pkt(ctdb
, handle
->hdr
);
766 static int pinned_down_destructor(struct pinned_down_deferred_call
*pinned_down
)
768 struct ctdb_context
*ctdb
= pinned_down
->ctdb
;
769 struct pinned_down_requeue_handle
*handle
= talloc(ctdb
, struct pinned_down_requeue_handle
);
771 handle
->ctdb
= pinned_down
->ctdb
;
772 handle
->hdr
= pinned_down
->hdr
;
773 talloc_steal(handle
, handle
->hdr
);
775 tevent_add_timer(ctdb
->ev
, handle
, timeval_zero(),
776 pinned_down_requeue
, handle
);
782 ctdb_defer_pinned_down_request(struct ctdb_context
*ctdb
, struct ctdb_db_context
*ctdb_db
, TDB_DATA key
, struct ctdb_req_header
*hdr
)
784 TALLOC_CTX
*tmp_ctx
= talloc_new(NULL
);
786 struct ctdb_sticky_record
*sr
;
787 struct pinned_down_deferred_call
*pinned_down
;
789 k
= ctdb_key_to_idkey(tmp_ctx
, key
);
791 DEBUG(DEBUG_ERR
,("Failed to allocate key for sticky record\n"));
792 talloc_free(tmp_ctx
);
796 sr
= trbt_lookuparray32(ctdb_db
->sticky_records
, k
[0], &k
[0]);
798 talloc_free(tmp_ctx
);
802 talloc_free(tmp_ctx
);
804 if (sr
->pindown
== NULL
) {
808 pinned_down
= talloc(sr
->pindown
, struct pinned_down_deferred_call
);
809 if (pinned_down
== NULL
) {
810 DEBUG(DEBUG_ERR
,("Failed to allocate structure for deferred pinned down request\n"));
814 pinned_down
->ctdb
= ctdb
;
815 pinned_down
->hdr
= hdr
;
817 talloc_set_destructor(pinned_down
, pinned_down_destructor
);
818 talloc_steal(pinned_down
, hdr
);
823 static int hot_key_cmp(const void *a
, const void *b
)
825 const struct ctdb_db_hot_key
*ka
= (const struct ctdb_db_hot_key
*)a
;
826 const struct ctdb_db_hot_key
*kb
= (const struct ctdb_db_hot_key
*)b
;
828 if (ka
->count
< kb
->count
) {
831 if (ka
->count
> kb
->count
) {
839 ctdb_update_db_stat_hot_keys(struct ctdb_db_context
*ctdb_db
, TDB_DATA key
,
846 * If all slots are being used then only need to compare
847 * against the count in the 0th slot, since it contains the
850 if (ctdb_db
->statistics
.num_hot_keys
== MAX_HOT_KEYS
&&
851 count
<= ctdb_db
->hot_keys
[0].count
) {
855 /* see if we already know this key */
856 for (i
= 0; i
< MAX_HOT_KEYS
; i
++) {
857 if (key
.dsize
!= ctdb_db
->hot_keys
[i
].key
.dsize
) {
860 if (memcmp(key
.dptr
, ctdb_db
->hot_keys
[i
].key
.dptr
, key
.dsize
)) {
863 /* found an entry for this key */
864 if (count
<= ctdb_db
->hot_keys
[i
].count
) {
867 if (count
>= (2 * ctdb_db
->hot_keys
[i
].last_logged_count
)) {
868 keystr
= hex_encode_talloc(ctdb_db
,
869 (unsigned char *)key
.dptr
,
871 D_NOTICE("Updated hot key database=%s key=%s count=%d\n",
873 keystr
? keystr
: "" ,
876 ctdb_db
->hot_keys
[i
].last_logged_count
= count
;
878 ctdb_db
->hot_keys
[i
].count
= count
;
882 if (ctdb_db
->statistics
.num_hot_keys
< MAX_HOT_KEYS
) {
883 id
= ctdb_db
->statistics
.num_hot_keys
;
884 ctdb_db
->statistics
.num_hot_keys
++;
889 if (ctdb_db
->hot_keys
[id
].key
.dptr
!= NULL
) {
890 talloc_free(ctdb_db
->hot_keys
[id
].key
.dptr
);
892 ctdb_db
->hot_keys
[id
].key
.dsize
= key
.dsize
;
893 ctdb_db
->hot_keys
[id
].key
.dptr
= talloc_memdup(ctdb_db
,
896 ctdb_db
->hot_keys
[id
].count
= count
;
898 keystr
= hex_encode_talloc(ctdb_db
,
899 (unsigned char *)key
.dptr
, key
.dsize
);
900 D_NOTICE("Added hot key database=%s key=%s count=%d\n",
902 keystr
? keystr
: "" ,
905 ctdb_db
->hot_keys
[id
].last_logged_count
= count
;
908 qsort(&ctdb_db
->hot_keys
[0],
909 ctdb_db
->statistics
.num_hot_keys
,
910 sizeof(struct ctdb_db_hot_key
),
915 called when a CTDB_REQ_CALL packet comes in
917 void ctdb_request_call(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
919 struct ctdb_req_call_old
*c
= (struct ctdb_req_call_old
*)hdr
;
921 struct ctdb_reply_call_old
*r
;
923 struct ctdb_ltdb_header header
;
924 struct ctdb_call
*call
;
925 struct ctdb_db_context
*ctdb_db
;
926 int tmp_count
, bucket
;
928 if (ctdb
->methods
== NULL
) {
929 DEBUG(DEBUG_INFO
,(__location__
" Failed ctdb_request_call. Transport is DOWN\n"));
934 ctdb_db
= find_ctdb_db(ctdb
, c
->db_id
);
936 ctdb_send_error(ctdb
, hdr
, -1,
937 "Unknown database in request. db_id==0x%08x",
942 call
= talloc(hdr
, struct ctdb_call
);
943 CTDB_NO_MEMORY_FATAL(ctdb
, call
);
945 call
->call_id
= c
->callid
;
946 call
->key
.dptr
= c
->data
;
947 call
->key
.dsize
= c
->keylen
;
948 call
->call_data
.dptr
= c
->data
+ c
->keylen
;
949 call
->call_data
.dsize
= c
->calldatalen
;
950 call
->reply_data
.dptr
= NULL
;
951 call
->reply_data
.dsize
= 0;
954 /* If this record is pinned down we should defer the
955 request until the pindown times out
957 if (ctdb_db_sticky(ctdb_db
)) {
958 if (ctdb_defer_pinned_down_request(ctdb
, ctdb_db
, call
->key
, hdr
) == 0) {
960 ("Defer request for pinned down record in %s\n", ctdb_db
->db_name
));
966 if (dmaster_defer_add(ctdb_db
, hdr
, call
->key
) == 0) {
971 /* determine if we are the dmaster for this key. This also
972 fetches the record data (if any), thus avoiding a 2nd fetch of the data
973 if the call will be answered locally */
975 ret
= ctdb_ltdb_lock_fetch_requeue(ctdb_db
, call
->key
, &header
, hdr
, &data
,
976 ctdb_call_input_pkt
, ctdb
, false);
978 ctdb_send_error(ctdb
, hdr
, ret
, "ltdb fetch failed in ctdb_request_call");
983 DEBUG(DEBUG_INFO
,(__location__
" deferred ctdb_request_call\n"));
988 /* Dont do READONLY if we don't have a tracking database */
989 if ((c
->flags
& CTDB_WANT_READONLY
) && !ctdb_db_readonly(ctdb_db
)) {
990 c
->flags
&= ~CTDB_WANT_READONLY
;
993 if (header
.flags
& CTDB_REC_RO_REVOKE_COMPLETE
) {
994 header
.flags
&= ~CTDB_REC_RO_FLAGS
;
995 CTDB_INCREMENT_STAT(ctdb
, total_ro_revokes
);
996 CTDB_INCREMENT_DB_STAT(ctdb_db
, db_ro_revokes
);
997 if (ctdb_ltdb_store(ctdb_db
, call
->key
, &header
, data
) != 0) {
998 ctdb_fatal(ctdb
, "Failed to write header with cleared REVOKE flag");
1000 /* and clear out the tracking data */
1001 if (tdb_delete(ctdb_db
->rottdb
, call
->key
) != 0) {
1002 DEBUG(DEBUG_ERR
,(__location__
" Failed to clear out trackingdb record\n"));
1006 /* if we are revoking, we must defer all other calls until the revoke
1009 if (header
.flags
& CTDB_REC_RO_REVOKING_READONLY
) {
1010 talloc_free(data
.dptr
);
1011 ret
= ctdb_ltdb_unlock(ctdb_db
, call
->key
);
1013 if (ctdb_add_revoke_deferred_call(ctdb
, ctdb_db
, call
->key
, hdr
, ctdb_call_input_pkt
, ctdb
) != 0) {
1014 ctdb_fatal(ctdb
, "Failed to add deferred call for revoke child");
1021 * If we are not the dmaster and are not hosting any delegations,
1022 * then we redirect the request to the node than can answer it
1023 * (the lmaster or the dmaster).
1025 if ((header
.dmaster
!= ctdb
->pnn
)
1026 && (!(header
.flags
& CTDB_REC_RO_HAVE_DELEGATIONS
)) ) {
1027 talloc_free(data
.dptr
);
1028 ctdb_call_send_redirect(ctdb
, ctdb_db
, call
->key
, c
, &header
);
1030 ret
= ctdb_ltdb_unlock(ctdb_db
, call
->key
);
1032 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
1038 if ( (!(c
->flags
& CTDB_WANT_READONLY
))
1039 && (header
.flags
& (CTDB_REC_RO_HAVE_DELEGATIONS
|CTDB_REC_RO_HAVE_READONLY
)) ) {
1040 header
.flags
|= CTDB_REC_RO_REVOKING_READONLY
;
1041 if (ctdb_ltdb_store(ctdb_db
, call
->key
, &header
, data
) != 0) {
1042 ctdb_fatal(ctdb
, "Failed to store record with HAVE_DELEGATIONS set");
1044 ret
= ctdb_ltdb_unlock(ctdb_db
, call
->key
);
1046 if (ctdb_start_revoke_ro_record(ctdb
, ctdb_db
, call
->key
, &header
, data
) != 0) {
1047 ctdb_fatal(ctdb
, "Failed to start record revoke");
1049 talloc_free(data
.dptr
);
1051 if (ctdb_add_revoke_deferred_call(ctdb
, ctdb_db
, call
->key
, hdr
, ctdb_call_input_pkt
, ctdb
) != 0) {
1052 ctdb_fatal(ctdb
, "Failed to add deferred call for revoke child");
1059 /* If this is the first request for delegation. bump rsn and set
1060 * the delegations flag
1062 if ((c
->flags
& CTDB_WANT_READONLY
)
1063 && (c
->callid
== CTDB_FETCH_WITH_HEADER_FUNC
)
1064 && (!(header
.flags
& CTDB_REC_RO_HAVE_DELEGATIONS
))) {
1066 header
.flags
|= CTDB_REC_RO_HAVE_DELEGATIONS
;
1067 if (ctdb_ltdb_store(ctdb_db
, call
->key
, &header
, data
) != 0) {
1068 ctdb_fatal(ctdb
, "Failed to store record with HAVE_DELEGATIONS set");
1071 if ((c
->flags
& CTDB_WANT_READONLY
)
1072 && ((unsigned int)call
->call_id
== CTDB_FETCH_WITH_HEADER_FUNC
)) {
1075 tdata
= tdb_fetch(ctdb_db
->rottdb
, call
->key
);
1076 if (ctdb_trackingdb_add_pnn(ctdb
, &tdata
, c
->hdr
.srcnode
) != 0) {
1077 ctdb_fatal(ctdb
, "Failed to add node to trackingdb");
1079 if (tdb_store(ctdb_db
->rottdb
, call
->key
, tdata
, TDB_REPLACE
) != 0) {
1080 ctdb_fatal(ctdb
, "Failed to store trackingdb data");
1084 ret
= ctdb_ltdb_unlock(ctdb_db
, call
->key
);
1086 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
1089 len
= offsetof(struct ctdb_reply_call_old
, data
) + data
.dsize
+ sizeof(struct ctdb_ltdb_header
);
1090 r
= ctdb_transport_allocate(ctdb
, ctdb
, CTDB_REPLY_CALL
, len
,
1091 struct ctdb_reply_call_old
);
1092 CTDB_NO_MEMORY_FATAL(ctdb
, r
);
1093 r
->hdr
.destnode
= c
->hdr
.srcnode
;
1094 r
->hdr
.reqid
= c
->hdr
.reqid
;
1095 r
->hdr
.generation
= ctdb_db
->generation
;
1097 r
->datalen
= data
.dsize
+ sizeof(struct ctdb_ltdb_header
);
1099 header
.flags
|= CTDB_REC_RO_HAVE_READONLY
;
1100 header
.flags
&= ~CTDB_REC_RO_HAVE_DELEGATIONS
;
1101 memcpy(&r
->data
[0], &header
, sizeof(struct ctdb_ltdb_header
));
1104 memcpy(&r
->data
[sizeof(struct ctdb_ltdb_header
)], data
.dptr
, data
.dsize
);
1107 ctdb_queue_packet(ctdb
, &r
->hdr
);
1108 CTDB_INCREMENT_STAT(ctdb
, total_ro_delegations
);
1109 CTDB_INCREMENT_DB_STAT(ctdb_db
, db_ro_delegations
);
1116 CTDB_UPDATE_STAT(ctdb
, max_hop_count
, c
->hopcount
);
1117 tmp_count
= c
->hopcount
;
1123 if (bucket
>= MAX_COUNT_BUCKETS
) {
1124 bucket
= MAX_COUNT_BUCKETS
- 1;
1126 CTDB_INCREMENT_STAT(ctdb
, hop_count_bucket
[bucket
]);
1127 CTDB_INCREMENT_DB_STAT(ctdb_db
, hop_count_bucket
[bucket
]);
1129 /* If this database supports sticky records, then check if the
1130 hopcount is big. If it is it means the record is hot and we
1131 should make it sticky.
1133 if (ctdb_db_sticky(ctdb_db
) &&
1134 c
->hopcount
>= ctdb
->tunable
.hopcount_make_sticky
) {
1135 ctdb_make_record_sticky(ctdb
, ctdb_db
, call
->key
);
1139 /* Try if possible to migrate the record off to the caller node.
1140 * From the clients perspective a fetch of the data is just as
1141 * expensive as a migration.
1143 if (c
->hdr
.srcnode
!= ctdb
->pnn
) {
1144 if (ctdb_db
->persistent_state
) {
1145 DEBUG(DEBUG_INFO
, (__location__
" refusing migration"
1146 " of key %s while transaction is active\n",
1147 (char *)call
->key
.dptr
));
1149 DEBUG(DEBUG_DEBUG
,("pnn %u starting migration of %08x to %u\n",
1150 ctdb
->pnn
, ctdb_hash(&(call
->key
)), c
->hdr
.srcnode
));
1151 ctdb_call_send_dmaster(ctdb_db
, c
, &header
, &(call
->key
), &data
);
1152 talloc_free(data
.dptr
);
1154 ret
= ctdb_ltdb_unlock(ctdb_db
, call
->key
);
1156 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
1163 ret
= ctdb_call_local(ctdb_db
, call
, &header
, hdr
, &data
, true);
1165 DEBUG(DEBUG_ERR
,(__location__
" ctdb_call_local failed\n"));
1169 ret
= ctdb_ltdb_unlock(ctdb_db
, call
->key
);
1171 DEBUG(DEBUG_ERR
,(__location__
" ctdb_ltdb_unlock() failed with error %d\n", ret
));
1174 len
= offsetof(struct ctdb_reply_call_old
, data
) + call
->reply_data
.dsize
;
1175 r
= ctdb_transport_allocate(ctdb
, ctdb
, CTDB_REPLY_CALL
, len
,
1176 struct ctdb_reply_call_old
);
1177 CTDB_NO_MEMORY_FATAL(ctdb
, r
);
1178 r
->hdr
.destnode
= hdr
->srcnode
;
1179 r
->hdr
.reqid
= hdr
->reqid
;
1180 r
->hdr
.generation
= ctdb_db
->generation
;
1181 r
->status
= call
->status
;
1182 r
->datalen
= call
->reply_data
.dsize
;
1183 if (call
->reply_data
.dsize
) {
1184 memcpy(&r
->data
[0], call
->reply_data
.dptr
, call
->reply_data
.dsize
);
1187 ctdb_queue_packet(ctdb
, &r
->hdr
);
1194 * called when a CTDB_REPLY_CALL packet comes in
1196 * This packet comes in response to a CTDB_REQ_CALL request packet. It
1197 * contains any reply data from the call
1199 void ctdb_reply_call(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
1201 struct ctdb_reply_call_old
*c
= (struct ctdb_reply_call_old
*)hdr
;
1202 struct ctdb_call_state
*state
;
1204 state
= reqid_find(ctdb
->idr
, hdr
->reqid
, struct ctdb_call_state
);
1205 if (state
== NULL
) {
1206 DEBUG(DEBUG_ERR
, (__location__
" reqid %u not found\n", hdr
->reqid
));
1210 if (hdr
->reqid
!= state
->reqid
) {
1211 /* we found a record but it was the wrong one */
1212 DEBUG(DEBUG_ERR
, ("Dropped orphaned call reply with reqid:%u\n",hdr
->reqid
));
1217 /* read only delegation processing */
1218 /* If we got a FETCH_WITH_HEADER we should check if this is a ro
1219 * delegation since we may need to update the record header
1221 if (state
->c
->callid
== CTDB_FETCH_WITH_HEADER_FUNC
) {
1222 struct ctdb_db_context
*ctdb_db
= state
->ctdb_db
;
1223 struct ctdb_ltdb_header
*header
= (struct ctdb_ltdb_header
*)&c
->data
[0];
1224 struct ctdb_ltdb_header oldheader
;
1225 TDB_DATA key
, data
, olddata
;
1228 if (!(header
->flags
& CTDB_REC_RO_HAVE_READONLY
)) {
1233 key
.dsize
= state
->c
->keylen
;
1234 key
.dptr
= state
->c
->data
;
1235 ret
= ctdb_ltdb_lock_requeue(ctdb_db
, key
, hdr
,
1236 ctdb_call_input_pkt
, ctdb
, false);
1241 DEBUG(DEBUG_ERR
,(__location__
" Failed to get lock in ctdb_reply_call\n"));
1245 ret
= ctdb_ltdb_fetch(ctdb_db
, key
, &oldheader
, state
, &olddata
);
1247 DEBUG(DEBUG_ERR
, ("Failed to fetch old record in ctdb_reply_call\n"));
1248 ctdb_ltdb_unlock(ctdb_db
, key
);
1252 if (header
->rsn
<= oldheader
.rsn
) {
1253 ctdb_ltdb_unlock(ctdb_db
, key
);
1257 if (c
->datalen
< sizeof(struct ctdb_ltdb_header
)) {
1258 DEBUG(DEBUG_ERR
,(__location__
" Got FETCH_WITH_HEADER reply with too little data: %d bytes\n", c
->datalen
));
1259 ctdb_ltdb_unlock(ctdb_db
, key
);
1263 data
.dsize
= c
->datalen
- sizeof(struct ctdb_ltdb_header
);
1264 data
.dptr
= &c
->data
[sizeof(struct ctdb_ltdb_header
)];
1265 ret
= ctdb_ltdb_store(ctdb_db
, key
, header
, data
);
1267 DEBUG(DEBUG_ERR
, ("Failed to store new record in ctdb_reply_call\n"));
1268 ctdb_ltdb_unlock(ctdb_db
, key
);
1272 ctdb_ltdb_unlock(ctdb_db
, key
);
1276 state
->call
->reply_data
.dptr
= c
->data
;
1277 state
->call
->reply_data
.dsize
= c
->datalen
;
1278 state
->call
->status
= c
->status
;
1280 talloc_steal(state
, c
);
1282 state
->state
= CTDB_CALL_DONE
;
1283 if (state
->async
.fn
) {
1284 state
->async
.fn(state
);
1290 * called when a CTDB_REPLY_DMASTER packet comes in
1292 * This packet comes in from the lmaster in response to a CTDB_REQ_CALL
1293 * request packet. It means that the current dmaster wants to give us
1296 void ctdb_reply_dmaster(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
1298 struct ctdb_reply_dmaster_old
*c
= (struct ctdb_reply_dmaster_old
*)hdr
;
1299 struct ctdb_db_context
*ctdb_db
;
1301 uint32_t record_flags
= 0;
1305 ctdb_db
= find_ctdb_db(ctdb
, c
->db_id
);
1306 if (ctdb_db
== NULL
) {
1307 DEBUG(DEBUG_ERR
,("Unknown db_id 0x%x in ctdb_reply_dmaster\n", c
->db_id
));
1312 key
.dsize
= c
->keylen
;
1313 data
.dptr
= &c
->data
[key
.dsize
];
1314 data
.dsize
= c
->datalen
;
1315 len
= offsetof(struct ctdb_reply_dmaster_old
, data
) + key
.dsize
+ data
.dsize
1317 if (len
<= c
->hdr
.length
) {
1318 memcpy(&record_flags
, &c
->data
[c
->keylen
+ c
->datalen
],
1319 sizeof(record_flags
));
1322 dmaster_defer_setup(ctdb_db
, hdr
, key
);
1324 ret
= ctdb_ltdb_lock_requeue(ctdb_db
, key
, hdr
,
1325 ctdb_call_input_pkt
, ctdb
, false);
1330 DEBUG(DEBUG_ERR
,(__location__
" Failed to get lock in ctdb_reply_dmaster\n"));
1334 ctdb_become_dmaster(ctdb_db
, hdr
, key
, data
, c
->rsn
, record_flags
);
1339 called when a CTDB_REPLY_ERROR packet comes in
1341 void ctdb_reply_error(struct ctdb_context
*ctdb
, struct ctdb_req_header
*hdr
)
1343 struct ctdb_reply_error_old
*c
= (struct ctdb_reply_error_old
*)hdr
;
1344 struct ctdb_call_state
*state
;
1346 state
= reqid_find(ctdb
->idr
, hdr
->reqid
, struct ctdb_call_state
);
1347 if (state
== NULL
) {
1348 DEBUG(DEBUG_ERR
,("pnn %u Invalid reqid %u in ctdb_reply_error\n",
1349 ctdb
->pnn
, hdr
->reqid
));
1353 if (hdr
->reqid
!= state
->reqid
) {
1354 /* we found a record but it was the wrong one */
1355 DEBUG(DEBUG_ERR
, ("Dropped orphaned error reply with reqid:%u\n",hdr
->reqid
));
1359 talloc_steal(state
, c
);
1361 state
->state
= CTDB_CALL_ERROR
;
1362 state
->errmsg
= (char *)c
->msg
;
1363 if (state
->async
.fn
) {
1364 state
->async
.fn(state
);
1372 static int ctdb_call_destructor(struct ctdb_call_state
*state
)
1374 DLIST_REMOVE(state
->ctdb_db
->pending_calls
, state
);
1375 reqid_remove(state
->ctdb_db
->ctdb
->idr
, state
->reqid
);
1381 called when a ctdb_call needs to be resent after a reconfigure event
1383 static void ctdb_call_resend(struct ctdb_call_state
*state
)
1385 struct ctdb_context
*ctdb
= state
->ctdb_db
->ctdb
;
1387 state
->generation
= state
->ctdb_db
->generation
;
1389 /* use a new reqid, in case the old reply does eventually come in */
1390 reqid_remove(ctdb
->idr
, state
->reqid
);
1391 state
->reqid
= reqid_new(ctdb
->idr
, state
);
1392 state
->c
->hdr
.reqid
= state
->reqid
;
1394 /* update the generation count for this request, so its valid with the new vnn_map */
1395 state
->c
->hdr
.generation
= state
->generation
;
1397 /* send the packet to ourselves, it will be redirected appropriately */
1398 state
->c
->hdr
.destnode
= ctdb
->pnn
;
1400 ctdb_queue_packet(ctdb
, &state
->c
->hdr
);
1401 D_INFO("resent ctdb_call for db %s reqid %u generation %u\n",
1402 state
->ctdb_db
->db_name
,
1408 resend all pending calls on recovery
1410 void ctdb_call_resend_db(struct ctdb_db_context
*ctdb_db
)
1412 struct ctdb_call_state
*state
, *next
;
1413 unsigned int count
= 0;
1415 for (state
= ctdb_db
->pending_calls
; state
; state
= next
) {
1417 ctdb_call_resend(state
);
1420 /* Avoid logging a 0 count below */
1424 D_NOTICE("Resent calls for database=%s, generation=%u, count=%u\n",
1426 ctdb_db
->generation
,
1430 void ctdb_call_resend_all(struct ctdb_context
*ctdb
)
1432 struct ctdb_db_context
*ctdb_db
;
1434 for (ctdb_db
= ctdb
->db_list
; ctdb_db
; ctdb_db
= ctdb_db
->next
) {
1435 ctdb_call_resend_db(ctdb_db
);
1440 this allows the caller to setup a async.fn
1442 static void call_local_trigger(struct tevent_context
*ev
,
1443 struct tevent_timer
*te
,
1444 struct timeval t
, void *private_data
)
1446 struct ctdb_call_state
*state
= talloc_get_type(private_data
, struct ctdb_call_state
);
1447 if (state
->async
.fn
) {
1448 state
->async
.fn(state
);
1454 construct an event driven local ctdb_call
1456 this is used so that locally processed ctdb_call requests are processed
1457 in an event driven manner
1459 struct ctdb_call_state
*ctdb_call_local_send(struct ctdb_db_context
*ctdb_db
,
1460 struct ctdb_call
*call
,
1461 struct ctdb_ltdb_header
*header
,
1464 struct ctdb_call_state
*state
;
1465 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1468 state
= talloc_zero(ctdb_db
, struct ctdb_call_state
);
1469 CTDB_NO_MEMORY_NULL(ctdb
, state
);
1471 talloc_steal(state
, data
->dptr
);
1473 state
->state
= CTDB_CALL_DONE
;
1474 state
->call
= talloc(state
, struct ctdb_call
);
1475 CTDB_NO_MEMORY_NULL(ctdb
, state
->call
);
1476 *(state
->call
) = *call
;
1477 state
->ctdb_db
= ctdb_db
;
1479 ret
= ctdb_call_local(ctdb_db
, state
->call
, header
, state
, data
, true);
1481 DEBUG(DEBUG_DEBUG
,("ctdb_call_local() failed, ignoring return code %d\n", ret
));
1484 tevent_add_timer(ctdb
->ev
, state
, timeval_zero(),
1485 call_local_trigger
, state
);
1492 make a remote ctdb call - async send. Called in daemon context.
1494 This constructs a ctdb_call request and queues it for processing.
1495 This call never blocks.
1497 struct ctdb_call_state
*ctdb_daemon_call_send_remote(struct ctdb_db_context
*ctdb_db
,
1498 struct ctdb_call
*call
,
1499 struct ctdb_ltdb_header
*header
)
1502 struct ctdb_call_state
*state
;
1503 struct ctdb_context
*ctdb
= ctdb_db
->ctdb
;
1504 struct ctdb_req_call_old
*c
;
1506 if (ctdb
->methods
== NULL
) {
1507 DEBUG(DEBUG_INFO
,(__location__
" Failed send packet. Transport is down\n"));
1511 state
= talloc_zero(ctdb_db
, struct ctdb_call_state
);
1512 CTDB_NO_MEMORY_NULL(ctdb
, state
);
1513 state
->call
= talloc(state
, struct ctdb_call
);
1514 CTDB_NO_MEMORY_NULL(ctdb
, state
->call
);
1516 state
->reqid
= reqid_new(ctdb
->idr
, state
);
1517 state
->ctdb_db
= ctdb_db
;
1518 state
->state
= CTDB_CALL_WAIT
;
1519 state
->generation
= ctdb_db
->generation
;
1521 len
= offsetof(struct ctdb_req_call_old
, data
) + call
->key
.dsize
+
1522 call
->call_data
.dsize
;
1524 c
= ctdb_transport_allocate(ctdb
,
1528 struct ctdb_req_call_old
);
1530 CTDB_NO_MEMORY_NULL(ctdb
, c
);
1533 c
->hdr
.destnode
= header
->dmaster
;
1534 c
->hdr
.reqid
= state
->reqid
;
1535 c
->hdr
.generation
= ctdb_db
->generation
;
1536 c
->flags
= call
->flags
;
1537 c
->db_id
= ctdb_db
->db_id
;
1538 c
->callid
= call
->call_id
;
1540 c
->keylen
= call
->key
.dsize
;
1541 c
->calldatalen
= call
->call_data
.dsize
;
1543 memcpy(&c
->data
[0], call
->key
.dptr
, call
->key
.dsize
);
1544 memcpy(&c
->data
[call
->key
.dsize
],
1545 call
->call_data
.dptr
,
1546 call
->call_data
.dsize
);
1548 *(state
->call
) = *call
;
1549 state
->call
->call_data
.dptr
= &c
->data
[call
->key
.dsize
];
1550 state
->call
->key
.dptr
= &c
->data
[0];
1552 DLIST_ADD(ctdb_db
->pending_calls
, state
);
1554 talloc_set_destructor(state
, ctdb_call_destructor
);
1555 ctdb_queue_packet(ctdb
, &state
->c
->hdr
);
1561 make a remote ctdb call - async recv - called in daemon context
1563 This is called when the program wants to wait for a ctdb_call to complete and get the
1564 results. This call will block unless the call has already completed.
1566 int ctdb_daemon_call_recv(struct ctdb_call_state
*state
, struct ctdb_call
*call
)
1568 while (state
->state
< CTDB_CALL_DONE
) {
1569 tevent_loop_once(state
->ctdb_db
->ctdb
->ev
);
1571 if (state
->state
!= CTDB_CALL_DONE
) {
1572 ctdb_set_error(state
->ctdb_db
->ctdb
, "%s", state
->errmsg
);
1577 if (state
->call
->reply_data
.dsize
) {
1578 call
->reply_data
.dptr
= talloc_memdup(call
,
1579 state
->call
->reply_data
.dptr
,
1580 state
->call
->reply_data
.dsize
);
1581 call
->reply_data
.dsize
= state
->call
->reply_data
.dsize
;
1583 call
->reply_data
.dptr
= NULL
;
1584 call
->reply_data
.dsize
= 0;
1586 call
->status
= state
->call
->status
;
1592 struct revokechild_deferred_call
{
1593 struct revokechild_deferred_call
*prev
, *next
;
1594 struct ctdb_context
*ctdb
;
1595 struct ctdb_req_header
*hdr
;
1596 deferred_requeue_fn fn
;
1598 struct revokechild_handle
*rev_hdl
;
1601 struct revokechild_handle
{
1602 struct revokechild_handle
*next
, *prev
;
1603 struct ctdb_context
*ctdb
;
1604 struct ctdb_db_context
*ctdb_db
;
1605 struct tevent_fd
*fde
;
1610 struct revokechild_deferred_call
*deferred_call_list
;
1613 static void deferred_call_requeue(struct tevent_context
*ev
,
1614 struct tevent_timer
*te
,
1615 struct timeval t
, void *private_data
)
1617 struct revokechild_deferred_call
*dlist
= talloc_get_type_abort(
1618 private_data
, struct revokechild_deferred_call
);
1620 while (dlist
!= NULL
) {
1621 struct revokechild_deferred_call
*dcall
= dlist
;
1623 talloc_set_destructor(dcall
, NULL
);
1624 DLIST_REMOVE(dlist
, dcall
);
1625 dcall
->fn(dcall
->ctx
, dcall
->hdr
);
1630 static int deferred_call_destructor(struct revokechild_deferred_call
*dcall
)
1632 struct revokechild_handle
*rev_hdl
= dcall
->rev_hdl
;
1634 DLIST_REMOVE(rev_hdl
->deferred_call_list
, dcall
);
1638 static int revokechild_destructor(struct revokechild_handle
*rev_hdl
)
1640 struct revokechild_deferred_call
*now_list
= NULL
;
1641 struct revokechild_deferred_call
*delay_list
= NULL
;
1643 if (rev_hdl
->fde
!= NULL
) {
1644 talloc_free(rev_hdl
->fde
);
1647 if (rev_hdl
->fd
[0] != -1) {
1648 close(rev_hdl
->fd
[0]);
1650 if (rev_hdl
->fd
[1] != -1) {
1651 close(rev_hdl
->fd
[1]);
1653 ctdb_kill(rev_hdl
->ctdb
, rev_hdl
->child
, SIGKILL
);
1655 DLIST_REMOVE(rev_hdl
->ctdb_db
->revokechild_active
, rev_hdl
);
1657 while (rev_hdl
->deferred_call_list
!= NULL
) {
1658 struct revokechild_deferred_call
*dcall
;
1660 dcall
= rev_hdl
->deferred_call_list
;
1661 DLIST_REMOVE(rev_hdl
->deferred_call_list
, dcall
);
1663 /* If revoke is successful, then first process all the calls
1664 * that need write access, and delay readonly requests by 1
1667 * If revoke is unsuccessful, most likely because of node
1668 * failure, delay all the pending requests, so database can
1672 if (rev_hdl
->status
== 0) {
1673 struct ctdb_req_call_old
*c
;
1675 c
= (struct ctdb_req_call_old
*)dcall
->hdr
;
1676 if (c
->flags
& CTDB_WANT_READONLY
) {
1677 DLIST_ADD(delay_list
, dcall
);
1679 DLIST_ADD(now_list
, dcall
);
1682 DLIST_ADD(delay_list
, dcall
);
1686 if (now_list
!= NULL
) {
1687 tevent_add_timer(rev_hdl
->ctdb
->ev
,
1689 tevent_timeval_current_ofs(0, 0),
1690 deferred_call_requeue
,
1694 if (delay_list
!= NULL
) {
1695 tevent_add_timer(rev_hdl
->ctdb
->ev
,
1697 tevent_timeval_current_ofs(1, 0),
1698 deferred_call_requeue
,
1705 static void revokechild_handler(struct tevent_context
*ev
,
1706 struct tevent_fd
*fde
,
1707 uint16_t flags
, void *private_data
)
1709 struct revokechild_handle
*rev_hdl
=
1710 talloc_get_type(private_data
, struct revokechild_handle
);
1714 ret
= sys_read(rev_hdl
->fd
[0], &c
, 1);
1716 DEBUG(DEBUG_ERR
,("Failed to read status from revokechild. errno:%d\n", errno
));
1717 rev_hdl
->status
= -1;
1718 talloc_free(rev_hdl
);
1722 DEBUG(DEBUG_ERR
,("revokechild returned failure. status:%d\n", c
));
1723 rev_hdl
->status
= -1;
1724 talloc_free(rev_hdl
);
1728 talloc_free(rev_hdl
);
1731 struct ctdb_revoke_state
{
1732 struct ctdb_db_context
*ctdb_db
;
1734 struct ctdb_ltdb_header
*header
;
1741 static void update_record_cb(struct ctdb_client_control_state
*state
)
1743 struct ctdb_revoke_state
*revoke_state
;
1747 if (state
== NULL
) {
1750 revoke_state
= state
->async
.private_data
;
1752 state
->async
.fn
= NULL
;
1753 ret
= ctdb_control_recv(state
->ctdb
, state
, state
, NULL
, &res
, NULL
);
1754 if ((ret
!= 0) || (res
!= 0)) {
1755 DEBUG(DEBUG_ERR
,("Recv for revoke update record failed ret:%d res:%d\n", ret
, res
));
1756 revoke_state
->status
= -1;
1759 revoke_state
->count
--;
1760 if (revoke_state
->count
<= 0) {
1761 revoke_state
->finished
= 1;
1765 static void revoke_send_cb(struct ctdb_context
*ctdb
, uint32_t pnn
, void *private_data
)
1767 struct ctdb_revoke_state
*revoke_state
= private_data
;
1768 struct ctdb_client_control_state
*state
;
1770 state
= ctdb_ctrl_updaterecord_send(ctdb
, revoke_state
, timeval_current_ofs(ctdb
->tunable
.control_timeout
,0), pnn
, revoke_state
->ctdb_db
, revoke_state
->key
, revoke_state
->header
, revoke_state
->data
);
1771 if (state
== NULL
) {
1772 DEBUG(DEBUG_ERR
,("Failure to send update record to revoke readonly delegation\n"));
1773 revoke_state
->status
= -1;
1776 state
->async
.fn
= update_record_cb
;
1777 state
->async
.private_data
= revoke_state
;
1779 revoke_state
->count
++;
1783 static void ctdb_revoke_timeout_handler(struct tevent_context
*ev
,
1784 struct tevent_timer
*te
,
1785 struct timeval yt
, void *private_data
)
1787 struct ctdb_revoke_state
*state
= private_data
;
1789 DEBUG(DEBUG_ERR
,("Timed out waiting for revoke to finish\n"));
1790 state
->finished
= 1;
1794 static int ctdb_revoke_all_delegations(struct ctdb_context
*ctdb
, struct ctdb_db_context
*ctdb_db
, TDB_DATA tdata
, TDB_DATA key
, struct ctdb_ltdb_header
*header
, TDB_DATA data
)
1796 struct ctdb_revoke_state
*state
= talloc_zero(ctdb
, struct ctdb_revoke_state
);
1797 struct ctdb_ltdb_header new_header
;
1800 state
->ctdb_db
= ctdb_db
;
1802 state
->header
= header
;
1805 ctdb_trackingdb_traverse(ctdb
, tdata
, revoke_send_cb
, state
);
1807 tevent_add_timer(ctdb
->ev
, state
,
1808 timeval_current_ofs(ctdb
->tunable
.control_timeout
, 0),
1809 ctdb_revoke_timeout_handler
, state
);
1811 while (state
->finished
== 0) {
1812 tevent_loop_once(ctdb
->ev
);
1815 if (ctdb_ltdb_lock(ctdb_db
, key
) != 0) {
1816 DEBUG(DEBUG_ERR
,("Failed to chainlock the database in revokechild\n"));
1820 if (ctdb_ltdb_fetch(ctdb_db
, key
, &new_header
, state
, &new_data
) != 0) {
1821 ctdb_ltdb_unlock(ctdb_db
, key
);
1822 DEBUG(DEBUG_ERR
,("Failed for fetch tdb record in revokechild\n"));
1827 if (new_header
.rsn
> header
->rsn
) {
1828 ctdb_ltdb_unlock(ctdb_db
, key
);
1829 DEBUG(DEBUG_ERR
,("RSN too high in tdb record in revokechild\n"));
1833 if ( (new_header
.flags
& (CTDB_REC_RO_REVOKING_READONLY
|CTDB_REC_RO_HAVE_DELEGATIONS
)) != (CTDB_REC_RO_REVOKING_READONLY
|CTDB_REC_RO_HAVE_DELEGATIONS
) ) {
1834 ctdb_ltdb_unlock(ctdb_db
, key
);
1835 DEBUG(DEBUG_ERR
,("Flags are wrong in tdb record in revokechild\n"));
1841 * If revoke on all nodes succeed, revoke is complete. Otherwise,
1842 * remove CTDB_REC_RO_REVOKING_READONLY flag and retry.
1844 if (state
->status
== 0) {
1846 new_header
.flags
|= CTDB_REC_RO_REVOKE_COMPLETE
;
1848 DEBUG(DEBUG_NOTICE
, ("Revoke all delegations failed, retrying.\n"));
1849 new_header
.flags
&= ~CTDB_REC_RO_REVOKING_READONLY
;
1851 if (ctdb_ltdb_store(ctdb_db
, key
, &new_header
, new_data
) != 0) {
1852 ctdb_ltdb_unlock(ctdb_db
, key
);
1853 DEBUG(DEBUG_ERR
,("Failed to write new record in revokechild\n"));
1857 ctdb_ltdb_unlock(ctdb_db
, key
);
1864 int ctdb_start_revoke_ro_record(struct ctdb_context
*ctdb
,
1865 struct ctdb_db_context
*ctdb_db
,
1867 struct ctdb_ltdb_header
*header
,
1871 struct revokechild_handle
*rev_hdl
;
1872 pid_t parent
= getpid();
1875 header
->flags
&= ~(CTDB_REC_RO_REVOKING_READONLY
|
1876 CTDB_REC_RO_HAVE_DELEGATIONS
|
1877 CTDB_REC_RO_HAVE_READONLY
);
1879 header
->flags
|= CTDB_REC_FLAG_MIGRATED_WITH_DATA
;
1882 rev_hdl
= talloc_zero(ctdb_db
, struct revokechild_handle
);
1883 if (rev_hdl
== NULL
) {
1884 D_ERR("Failed to allocate revokechild_handle\n");
1888 tdata
= tdb_fetch(ctdb_db
->rottdb
, key
);
1889 if (tdata
.dsize
> 0) {
1893 tdata
.dptr
= talloc_memdup(rev_hdl
, tdata
.dptr
, tdata
.dsize
);
1897 rev_hdl
->status
= 0;
1898 rev_hdl
->ctdb
= ctdb
;
1899 rev_hdl
->ctdb_db
= ctdb_db
;
1900 rev_hdl
->fd
[0] = -1;
1901 rev_hdl
->fd
[1] = -1;
1903 rev_hdl
->key
.dsize
= key
.dsize
;
1904 rev_hdl
->key
.dptr
= talloc_memdup(rev_hdl
, key
.dptr
, key
.dsize
);
1905 if (rev_hdl
->key
.dptr
== NULL
) {
1906 D_ERR("Failed to allocate key for revokechild_handle\n");
1910 ret
= pipe(rev_hdl
->fd
);
1912 D_ERR("Failed to allocate key for revokechild_handle\n");
1917 rev_hdl
->child
= ctdb_fork(ctdb
);
1918 if (rev_hdl
->child
== (pid_t
)-1) {
1919 D_ERR("Failed to fork child for revokechild\n");
1923 if (rev_hdl
->child
== 0) {
1925 close(rev_hdl
->fd
[0]);
1927 prctl_set_comment("ctdb_revokechild");
1928 if (switch_from_server_to_client(ctdb
) != 0) {
1929 D_ERR("Failed to switch from server to client "
1930 "for revokechild process\n");
1932 goto child_finished
;
1935 c
= ctdb_revoke_all_delegations(ctdb
,
1943 sys_write(rev_hdl
->fd
[1], &c
, 1);
1944 ctdb_wait_for_process_to_exit(parent
);
1948 close(rev_hdl
->fd
[1]);
1949 rev_hdl
->fd
[1] = -1;
1950 set_close_on_exec(rev_hdl
->fd
[0]);
1952 rev_hdl
->fde
= tevent_add_fd(ctdb
->ev
,
1956 revokechild_handler
,
1959 if (rev_hdl
->fde
== NULL
) {
1960 D_ERR("Failed to set up fd event for revokechild process\n");
1961 talloc_free(rev_hdl
);
1963 tevent_fd_set_auto_close(rev_hdl
->fde
);
1965 /* This is an active revokechild child process */
1966 DLIST_ADD_END(ctdb_db
->revokechild_active
, rev_hdl
);
1967 talloc_set_destructor(rev_hdl
, revokechild_destructor
);
1971 talloc_free(rev_hdl
);
1975 int ctdb_add_revoke_deferred_call(struct ctdb_context
*ctdb
, struct ctdb_db_context
*ctdb_db
, TDB_DATA key
, struct ctdb_req_header
*hdr
, deferred_requeue_fn fn
, void *call_context
)
1977 struct revokechild_handle
*rev_hdl
;
1978 struct revokechild_deferred_call
*deferred_call
;
1980 for (rev_hdl
= ctdb_db
->revokechild_active
;
1982 rev_hdl
= rev_hdl
->next
) {
1983 if (rev_hdl
->key
.dsize
== 0) {
1986 if (rev_hdl
->key
.dsize
!= key
.dsize
) {
1989 if (!memcmp(rev_hdl
->key
.dptr
, key
.dptr
, key
.dsize
)) {
1994 if (rev_hdl
== NULL
) {
1995 DEBUG(DEBUG_ERR
,("Failed to add deferred call to revoke list. revoke structure not found\n"));
1999 deferred_call
= talloc(call_context
, struct revokechild_deferred_call
);
2000 if (deferred_call
== NULL
) {
2001 DEBUG(DEBUG_ERR
,("Failed to allocate deferred call structure for revoking record\n"));
2005 deferred_call
->ctdb
= ctdb
;
2006 deferred_call
->hdr
= talloc_steal(deferred_call
, hdr
);
2007 deferred_call
->fn
= fn
;
2008 deferred_call
->ctx
= call_context
;
2009 deferred_call
->rev_hdl
= rev_hdl
;
2011 talloc_set_destructor(deferred_call
, deferred_call_destructor
);
2013 DLIST_ADD(rev_hdl
->deferred_call_list
, deferred_call
);
2018 static void ctdb_migration_count_handler(TDB_DATA key
, uint64_t counter
,
2021 struct ctdb_db_context
*ctdb_db
= talloc_get_type_abort(
2022 private_data
, struct ctdb_db_context
);
2025 value
= (counter
< INT_MAX
? counter
: INT_MAX
);
2026 ctdb_update_db_stat_hot_keys(ctdb_db
, key
, value
);
2029 static void ctdb_migration_cleandb_event(struct tevent_context
*ev
,
2030 struct tevent_timer
*te
,
2031 struct timeval current_time
,
2034 struct ctdb_db_context
*ctdb_db
= talloc_get_type_abort(
2035 private_data
, struct ctdb_db_context
);
2037 if (ctdb_db
->migratedb
== NULL
) {
2041 hash_count_expire(ctdb_db
->migratedb
, NULL
);
2043 te
= tevent_add_timer(ctdb_db
->ctdb
->ev
, ctdb_db
->migratedb
,
2044 tevent_timeval_current_ofs(10, 0),
2045 ctdb_migration_cleandb_event
, ctdb_db
);
2048 ("Memory error in migration cleandb event for %s\n",
2050 TALLOC_FREE(ctdb_db
->migratedb
);
2054 int ctdb_migration_init(struct ctdb_db_context
*ctdb_db
)
2056 struct timeval one_second
= { 1, 0 };
2057 struct tevent_timer
*te
;
2060 if (! ctdb_db_volatile(ctdb_db
)) {
2064 ret
= hash_count_init(ctdb_db
, one_second
,
2065 ctdb_migration_count_handler
, ctdb_db
,
2066 &ctdb_db
->migratedb
);
2069 ("Memory error in migration init for %s\n",
2074 te
= tevent_add_timer(ctdb_db
->ctdb
->ev
, ctdb_db
->migratedb
,
2075 tevent_timeval_current_ofs(10, 0),
2076 ctdb_migration_cleandb_event
, ctdb_db
);
2079 ("Memory error in migration init for %s\n",
2081 TALLOC_FREE(ctdb_db
->migratedb
);