4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #include <linux/module.h>
30 #include <linux/drbd.h>
31 #include <asm/uaccess.h>
32 #include <asm/types.h>
34 #include <linux/ctype.h>
35 #include <linux/mutex.h>
37 #include <linux/file.h>
38 #include <linux/proc_fs.h>
39 #include <linux/init.h>
41 #include <linux/memcontrol.h>
42 #include <linux/mm_inline.h>
43 #include <linux/slab.h>
44 #include <linux/random.h>
45 #include <linux/reboot.h>
46 #include <linux/notifier.h>
47 #include <linux/kthread.h>
49 #define __KERNEL_SYSCALLS__
50 #include <linux/unistd.h>
51 #include <linux/vmalloc.h>
53 #include <linux/drbd_limits.h>
55 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
59 struct after_state_chg_work
{
63 enum chg_state_flags flags
;
64 struct completion
*done
;
67 static DEFINE_MUTEX(drbd_main_mutex
);
68 int drbdd_init(struct drbd_thread
*);
69 int drbd_worker(struct drbd_thread
*);
70 int drbd_asender(struct drbd_thread
*);
73 static int drbd_open(struct block_device
*bdev
, fmode_t mode
);
74 static int drbd_release(struct gendisk
*gd
, fmode_t mode
);
75 static int w_after_state_ch(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
76 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
77 union drbd_state ns
, enum chg_state_flags flags
);
78 static int w_md_sync(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
79 static void md_sync_timer_fn(unsigned long data
);
80 static int w_bitmap_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
81 static int w_go_diskless(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
);
83 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
84 "Lars Ellenberg <lars@linbit.com>");
85 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION
);
86 MODULE_VERSION(REL_VERSION
);
87 MODULE_LICENSE("GPL");
88 MODULE_PARM_DESC(minor_count
, "Maximum number of drbd devices (1-255)");
89 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR
);
91 #include <linux/moduleparam.h>
92 /* allow_open_on_secondary */
93 MODULE_PARM_DESC(allow_oos
, "DONT USE!");
94 /* thanks to these macros, if compiled into the kernel (not-module),
95 * this becomes the boot parameter drbd.minor_count */
96 module_param(minor_count
, uint
, 0444);
97 module_param(disable_sendpage
, bool, 0644);
98 module_param(allow_oos
, bool, 0);
99 module_param(cn_idx
, uint
, 0444);
100 module_param(proc_details
, int, 0644);
102 #ifdef CONFIG_DRBD_FAULT_INJECTION
105 static int fault_count
;
107 /* bitmap of enabled faults */
108 module_param(enable_faults
, int, 0664);
109 /* fault rate % value - applies to all enabled faults */
110 module_param(fault_rate
, int, 0664);
111 /* count of faults inserted */
112 module_param(fault_count
, int, 0664);
113 /* bitmap of devices to insert faults on */
114 module_param(fault_devs
, int, 0644);
117 /* module parameter, defined */
118 unsigned int minor_count
= 32;
119 int disable_sendpage
;
121 unsigned int cn_idx
= CN_IDX_DRBD
;
122 int proc_details
; /* Detail level in proc drbd*/
124 /* Module parameter for setting the user mode helper program
125 * to run. Default is /sbin/drbdadm */
126 char usermode_helper
[80] = "/sbin/drbdadm";
128 module_param_string(usermode_helper
, usermode_helper
, sizeof(usermode_helper
), 0644);
130 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
131 * as member "struct gendisk *vdisk;"
133 struct drbd_conf
**minor_table
;
135 struct kmem_cache
*drbd_request_cache
;
136 struct kmem_cache
*drbd_ee_cache
; /* epoch entries */
137 struct kmem_cache
*drbd_bm_ext_cache
; /* bitmap extents */
138 struct kmem_cache
*drbd_al_ext_cache
; /* activity log extents */
139 mempool_t
*drbd_request_mempool
;
140 mempool_t
*drbd_ee_mempool
;
142 /* I do not use a standard mempool, because:
143 1) I want to hand out the pre-allocated objects first.
144 2) I want to be able to interrupt sleeping allocation with a signal.
145 Note: This is a single linked list, the next pointer is the private
146 member of struct page.
148 struct page
*drbd_pp_pool
;
149 spinlock_t drbd_pp_lock
;
151 wait_queue_head_t drbd_pp_wait
;
153 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state
, 5 * HZ
, 5);
155 static const struct block_device_operations drbd_ops
= {
156 .owner
= THIS_MODULE
,
158 .release
= drbd_release
,
161 #define ARRY_SIZE(A) (sizeof(A)/sizeof(A[0]))
164 /* When checking with sparse, and this is an inline function, sparse will
165 give tons of false positives. When this is a real functions sparse works.
167 int _get_ldev_if_state(struct drbd_conf
*mdev
, enum drbd_disk_state mins
)
171 atomic_inc(&mdev
->local_cnt
);
172 io_allowed
= (mdev
->state
.disk
>= mins
);
174 if (atomic_dec_and_test(&mdev
->local_cnt
))
175 wake_up(&mdev
->misc_wait
);
183 * DOC: The transfer log
185 * The transfer log is a single linked list of &struct drbd_tl_epoch objects.
186 * mdev->newest_tle points to the head, mdev->oldest_tle points to the tail
187 * of the list. There is always at least one &struct drbd_tl_epoch object.
189 * Each &struct drbd_tl_epoch has a circular double linked list of requests
192 static int tl_init(struct drbd_conf
*mdev
)
194 struct drbd_tl_epoch
*b
;
196 /* during device minor initialization, we may well use GFP_KERNEL */
197 b
= kmalloc(sizeof(struct drbd_tl_epoch
), GFP_KERNEL
);
200 INIT_LIST_HEAD(&b
->requests
);
201 INIT_LIST_HEAD(&b
->w
.list
);
205 b
->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
207 mdev
->oldest_tle
= b
;
208 mdev
->newest_tle
= b
;
209 INIT_LIST_HEAD(&mdev
->out_of_sequence_requests
);
211 mdev
->tl_hash
= NULL
;
217 static void tl_cleanup(struct drbd_conf
*mdev
)
219 D_ASSERT(mdev
->oldest_tle
== mdev
->newest_tle
);
220 D_ASSERT(list_empty(&mdev
->out_of_sequence_requests
));
221 kfree(mdev
->oldest_tle
);
222 mdev
->oldest_tle
= NULL
;
223 kfree(mdev
->unused_spare_tle
);
224 mdev
->unused_spare_tle
= NULL
;
225 kfree(mdev
->tl_hash
);
226 mdev
->tl_hash
= NULL
;
231 * _tl_add_barrier() - Adds a barrier to the transfer log
232 * @mdev: DRBD device.
233 * @new: Barrier to be added before the current head of the TL.
235 * The caller must hold the req_lock.
237 void _tl_add_barrier(struct drbd_conf
*mdev
, struct drbd_tl_epoch
*new)
239 struct drbd_tl_epoch
*newest_before
;
241 INIT_LIST_HEAD(&new->requests
);
242 INIT_LIST_HEAD(&new->w
.list
);
243 new->w
.cb
= NULL
; /* if this is != NULL, we need to dec_ap_pending in tl_clear */
247 newest_before
= mdev
->newest_tle
;
248 /* never send a barrier number == 0, because that is special-cased
249 * when using TCQ for our write ordering code */
250 new->br_number
= (newest_before
->br_number
+1) ?: 1;
251 if (mdev
->newest_tle
!= new) {
252 mdev
->newest_tle
->next
= new;
253 mdev
->newest_tle
= new;
258 * tl_release() - Free or recycle the oldest &struct drbd_tl_epoch object of the TL
259 * @mdev: DRBD device.
260 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
261 * @set_size: Expected number of requests before that barrier.
263 * In case the passed barrier_nr or set_size does not match the oldest
264 * &struct drbd_tl_epoch objects this function will cause a termination
267 void tl_release(struct drbd_conf
*mdev
, unsigned int barrier_nr
,
268 unsigned int set_size
)
270 struct drbd_tl_epoch
*b
, *nob
; /* next old barrier */
271 struct list_head
*le
, *tle
;
272 struct drbd_request
*r
;
274 spin_lock_irq(&mdev
->req_lock
);
276 b
= mdev
->oldest_tle
;
278 /* first some paranoia code */
280 dev_err(DEV
, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
284 if (b
->br_number
!= barrier_nr
) {
285 dev_err(DEV
, "BAD! BarrierAck #%u received, expected #%u!\n",
286 barrier_nr
, b
->br_number
);
289 if (b
->n_writes
!= set_size
) {
290 dev_err(DEV
, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
291 barrier_nr
, set_size
, b
->n_writes
);
295 /* Clean up list of requests processed during current epoch */
296 list_for_each_safe(le
, tle
, &b
->requests
) {
297 r
= list_entry(le
, struct drbd_request
, tl_requests
);
298 _req_mod(r
, barrier_acked
);
300 /* There could be requests on the list waiting for completion
301 of the write to the local disk. To avoid corruptions of
302 slab's data structures we have to remove the lists head.
304 Also there could have been a barrier ack out of sequence, overtaking
305 the write acks - which would be a bug and violating write ordering.
306 To not deadlock in case we lose connection while such requests are
307 still pending, we need some way to find them for the
308 _req_mode(connection_lost_while_pending).
310 These have been list_move'd to the out_of_sequence_requests list in
311 _req_mod(, barrier_acked) above.
313 list_del_init(&b
->requests
);
316 if (test_and_clear_bit(CREATE_BARRIER
, &mdev
->flags
)) {
317 _tl_add_barrier(mdev
, b
);
319 mdev
->oldest_tle
= nob
;
320 /* if nob == NULL b was the only barrier, and becomes the new
321 barrier. Therefore mdev->oldest_tle points already to b */
323 D_ASSERT(nob
!= NULL
);
324 mdev
->oldest_tle
= nob
;
328 spin_unlock_irq(&mdev
->req_lock
);
329 dec_ap_pending(mdev
);
334 spin_unlock_irq(&mdev
->req_lock
);
335 drbd_force_state(mdev
, NS(conn
, C_PROTOCOL_ERROR
));
339 * _tl_restart() - Walks the transfer log, and applies an action to all requests
340 * @mdev: DRBD device.
341 * @what: The action/event to perform with all request objects
343 * @what might be one of connection_lost_while_pending, resend, fail_frozen_disk_io,
344 * restart_frozen_disk_io.
346 static void _tl_restart(struct drbd_conf
*mdev
, enum drbd_req_event what
)
348 struct drbd_tl_epoch
*b
, *tmp
, **pn
;
349 struct list_head
*le
, *tle
, carry_reads
;
350 struct drbd_request
*req
;
351 int rv
, n_writes
, n_reads
;
353 b
= mdev
->oldest_tle
;
354 pn
= &mdev
->oldest_tle
;
358 INIT_LIST_HEAD(&carry_reads
);
359 list_for_each_safe(le
, tle
, &b
->requests
) {
360 req
= list_entry(le
, struct drbd_request
, tl_requests
);
361 rv
= _req_mod(req
, what
);
363 n_writes
+= (rv
& MR_WRITE
) >> MR_WRITE_SHIFT
;
364 n_reads
+= (rv
& MR_READ
) >> MR_READ_SHIFT
;
369 if (what
== resend
) {
370 b
->n_writes
= n_writes
;
371 if (b
->w
.cb
== NULL
) {
372 b
->w
.cb
= w_send_barrier
;
373 inc_ap_pending(mdev
);
374 set_bit(CREATE_BARRIER
, &mdev
->flags
);
377 drbd_queue_work(&mdev
->data
.work
, &b
->w
);
382 list_add(&carry_reads
, &b
->requests
);
383 /* there could still be requests on that ring list,
384 * in case local io is still pending */
385 list_del(&b
->requests
);
387 /* dec_ap_pending corresponding to queue_barrier.
388 * the newest barrier may not have been queued yet,
389 * in which case w.cb is still NULL. */
391 dec_ap_pending(mdev
);
393 if (b
== mdev
->newest_tle
) {
394 /* recycle, but reinit! */
395 D_ASSERT(tmp
== NULL
);
396 INIT_LIST_HEAD(&b
->requests
);
397 list_splice(&carry_reads
, &b
->requests
);
398 INIT_LIST_HEAD(&b
->w
.list
);
400 b
->br_number
= net_random();
410 list_splice(&carry_reads
, &b
->requests
);
416 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
417 * @mdev: DRBD device.
419 * This is called after the connection to the peer was lost. The storage covered
420 * by the requests on the transfer gets marked as our of sync. Called from the
421 * receiver thread and the worker thread.
423 void tl_clear(struct drbd_conf
*mdev
)
425 struct list_head
*le
, *tle
;
426 struct drbd_request
*r
;
428 spin_lock_irq(&mdev
->req_lock
);
430 _tl_restart(mdev
, connection_lost_while_pending
);
432 /* we expect this list to be empty. */
433 D_ASSERT(list_empty(&mdev
->out_of_sequence_requests
));
435 /* but just in case, clean it up anyways! */
436 list_for_each_safe(le
, tle
, &mdev
->out_of_sequence_requests
) {
437 r
= list_entry(le
, struct drbd_request
, tl_requests
);
438 /* It would be nice to complete outside of spinlock.
439 * But this is easier for now. */
440 _req_mod(r
, connection_lost_while_pending
);
443 /* ensure bit indicating barrier is required is clear */
444 clear_bit(CREATE_BARRIER
, &mdev
->flags
);
446 memset(mdev
->app_reads_hash
, 0, APP_R_HSIZE
*sizeof(void *));
448 spin_unlock_irq(&mdev
->req_lock
);
451 void tl_restart(struct drbd_conf
*mdev
, enum drbd_req_event what
)
453 spin_lock_irq(&mdev
->req_lock
);
454 _tl_restart(mdev
, what
);
455 spin_unlock_irq(&mdev
->req_lock
);
459 * cl_wide_st_chg() - TRUE if the state change is a cluster wide one
460 * @mdev: DRBD device.
461 * @os: old (current) state.
462 * @ns: new (wanted) state.
464 static int cl_wide_st_chg(struct drbd_conf
*mdev
,
465 union drbd_state os
, union drbd_state ns
)
467 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
468 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
469 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
470 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
471 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))) ||
472 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
473 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
);
476 int drbd_change_state(struct drbd_conf
*mdev
, enum chg_state_flags f
,
477 union drbd_state mask
, union drbd_state val
)
480 union drbd_state os
, ns
;
483 spin_lock_irqsave(&mdev
->req_lock
, flags
);
485 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
486 rv
= _drbd_set_state(mdev
, ns
, f
, NULL
);
488 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
494 * drbd_force_state() - Impose a change which happens outside our control on our state
495 * @mdev: DRBD device.
496 * @mask: mask of state bits to change.
497 * @val: value of new state bits.
499 void drbd_force_state(struct drbd_conf
*mdev
,
500 union drbd_state mask
, union drbd_state val
)
502 drbd_change_state(mdev
, CS_HARD
, mask
, val
);
505 static int is_valid_state(struct drbd_conf
*mdev
, union drbd_state ns
);
506 static int is_valid_state_transition(struct drbd_conf
*,
507 union drbd_state
, union drbd_state
);
508 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state os
,
509 union drbd_state ns
, const char **warn_sync_abort
);
510 int drbd_send_state_req(struct drbd_conf
*,
511 union drbd_state
, union drbd_state
);
513 static enum drbd_state_ret_codes
_req_st_cond(struct drbd_conf
*mdev
,
514 union drbd_state mask
, union drbd_state val
)
516 union drbd_state os
, ns
;
520 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &mdev
->flags
))
521 return SS_CW_SUCCESS
;
523 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &mdev
->flags
))
524 return SS_CW_FAILED_BY_PEER
;
527 spin_lock_irqsave(&mdev
->req_lock
, flags
);
529 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
530 ns
= sanitize_state(mdev
, os
, ns
, NULL
);
532 if (!cl_wide_st_chg(mdev
, os
, ns
))
535 rv
= is_valid_state(mdev
, ns
);
536 if (rv
== SS_SUCCESS
) {
537 rv
= is_valid_state_transition(mdev
, ns
, os
);
538 if (rv
== SS_SUCCESS
)
539 rv
= 0; /* cont waiting, otherwise fail. */
542 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
548 * drbd_req_state() - Perform an eventually cluster wide state change
549 * @mdev: DRBD device.
550 * @mask: mask of state bits to change.
551 * @val: value of new state bits.
554 * Should not be called directly, use drbd_request_state() or
555 * _drbd_request_state().
557 static int drbd_req_state(struct drbd_conf
*mdev
,
558 union drbd_state mask
, union drbd_state val
,
559 enum chg_state_flags f
)
561 struct completion done
;
563 union drbd_state os
, ns
;
566 init_completion(&done
);
568 if (f
& CS_SERIALIZE
)
569 mutex_lock(&mdev
->state_mutex
);
571 spin_lock_irqsave(&mdev
->req_lock
, flags
);
573 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
574 ns
= sanitize_state(mdev
, os
, ns
, NULL
);
576 if (cl_wide_st_chg(mdev
, os
, ns
)) {
577 rv
= is_valid_state(mdev
, ns
);
578 if (rv
== SS_SUCCESS
)
579 rv
= is_valid_state_transition(mdev
, ns
, os
);
580 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
582 if (rv
< SS_SUCCESS
) {
584 print_st_err(mdev
, os
, ns
, rv
);
588 drbd_state_lock(mdev
);
589 if (!drbd_send_state_req(mdev
, mask
, val
)) {
590 drbd_state_unlock(mdev
);
591 rv
= SS_CW_FAILED_BY_PEER
;
593 print_st_err(mdev
, os
, ns
, rv
);
597 wait_event(mdev
->state_wait
,
598 (rv
= _req_st_cond(mdev
, mask
, val
)));
600 if (rv
< SS_SUCCESS
) {
601 drbd_state_unlock(mdev
);
603 print_st_err(mdev
, os
, ns
, rv
);
606 spin_lock_irqsave(&mdev
->req_lock
, flags
);
608 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
609 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
610 drbd_state_unlock(mdev
);
612 rv
= _drbd_set_state(mdev
, ns
, f
, &done
);
615 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
617 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
618 D_ASSERT(current
!= mdev
->worker
.task
);
619 wait_for_completion(&done
);
623 if (f
& CS_SERIALIZE
)
624 mutex_unlock(&mdev
->state_mutex
);
630 * _drbd_request_state() - Request a state change (with flags)
631 * @mdev: DRBD device.
632 * @mask: mask of state bits to change.
633 * @val: value of new state bits.
636 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
637 * flag, or when logging of failed state change requests is not desired.
639 int _drbd_request_state(struct drbd_conf
*mdev
, union drbd_state mask
,
640 union drbd_state val
, enum chg_state_flags f
)
644 wait_event(mdev
->state_wait
,
645 (rv
= drbd_req_state(mdev
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
650 static void print_st(struct drbd_conf
*mdev
, char *name
, union drbd_state ns
)
652 dev_err(DEV
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c }\n",
654 drbd_conn_str(ns
.conn
),
655 drbd_role_str(ns
.role
),
656 drbd_role_str(ns
.peer
),
657 drbd_disk_str(ns
.disk
),
658 drbd_disk_str(ns
.pdsk
),
659 is_susp(ns
) ? 's' : 'r',
660 ns
.aftr_isp
? 'a' : '-',
661 ns
.peer_isp
? 'p' : '-',
662 ns
.user_isp
? 'u' : '-'
666 void print_st_err(struct drbd_conf
*mdev
,
667 union drbd_state os
, union drbd_state ns
, int err
)
669 if (err
== SS_IN_TRANSIENT_STATE
)
671 dev_err(DEV
, "State change failed: %s\n", drbd_set_st_err_str(err
));
672 print_st(mdev
, " state", os
);
673 print_st(mdev
, "wanted", ns
);
677 #define drbd_peer_str drbd_role_str
678 #define drbd_pdsk_str drbd_disk_str
680 #define drbd_susp_str(A) ((A) ? "1" : "0")
681 #define drbd_aftr_isp_str(A) ((A) ? "1" : "0")
682 #define drbd_peer_isp_str(A) ((A) ? "1" : "0")
683 #define drbd_user_isp_str(A) ((A) ? "1" : "0")
686 ({ if (ns.A != os.A) { \
687 pbp += sprintf(pbp, #A "( %s -> %s ) ", \
688 drbd_##A##_str(os.A), \
689 drbd_##A##_str(ns.A)); \
693 * is_valid_state() - Returns an SS_ error code if ns is not valid
694 * @mdev: DRBD device.
695 * @ns: State to consider.
697 static int is_valid_state(struct drbd_conf
*mdev
, union drbd_state ns
)
699 /* See drbd_state_sw_errors in drbd_strings.c */
701 enum drbd_fencing_p fp
;
705 if (get_ldev(mdev
)) {
706 fp
= mdev
->ldev
->dc
.fencing
;
710 if (get_net_conf(mdev
)) {
711 if (!mdev
->net_conf
->two_primaries
&&
712 ns
.role
== R_PRIMARY
&& ns
.peer
== R_PRIMARY
)
713 rv
= SS_TWO_PRIMARIES
;
718 /* already found a reason to abort */;
719 else if (ns
.role
== R_SECONDARY
&& mdev
->open_cnt
)
720 rv
= SS_DEVICE_IN_USE
;
722 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
723 rv
= SS_NO_UP_TO_DATE_DISK
;
725 else if (fp
>= FP_RESOURCE
&&
726 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
729 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
730 rv
= SS_NO_UP_TO_DATE_DISK
;
732 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
733 rv
= SS_NO_LOCAL_DISK
;
735 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
736 rv
= SS_NO_REMOTE_DISK
;
738 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
739 rv
= SS_NO_UP_TO_DATE_DISK
;
741 else if ((ns
.conn
== C_CONNECTED
||
742 ns
.conn
== C_WF_BITMAP_S
||
743 ns
.conn
== C_SYNC_SOURCE
||
744 ns
.conn
== C_PAUSED_SYNC_S
) &&
745 ns
.disk
== D_OUTDATED
)
746 rv
= SS_CONNECTED_OUTDATES
;
748 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
749 (mdev
->sync_conf
.verify_alg
[0] == 0))
750 rv
= SS_NO_VERIFY_ALG
;
752 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
753 mdev
->agreed_pro_version
< 88)
754 rv
= SS_NOT_SUPPORTED
;
760 * is_valid_state_transition() - Returns an SS_ error code if the state transition is not possible
761 * @mdev: DRBD device.
765 static int is_valid_state_transition(struct drbd_conf
*mdev
,
766 union drbd_state ns
, union drbd_state os
)
770 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
771 os
.conn
> C_CONNECTED
)
772 rv
= SS_RESYNC_RUNNING
;
774 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
775 rv
= SS_ALREADY_STANDALONE
;
777 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
780 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
781 rv
= SS_NO_NET_CONFIG
;
783 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
784 rv
= SS_LOWER_THAN_OUTDATED
;
786 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
787 rv
= SS_IN_TRANSIENT_STATE
;
789 if (ns
.conn
== os
.conn
&& ns
.conn
== C_WF_REPORT_PARAMS
)
790 rv
= SS_IN_TRANSIENT_STATE
;
792 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
793 rv
= SS_NEED_CONNECTION
;
795 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
796 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
797 rv
= SS_RESYNC_RUNNING
;
799 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
800 os
.conn
< C_CONNECTED
)
801 rv
= SS_NEED_CONNECTION
;
807 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
808 * @mdev: DRBD device.
813 * When we loose connection, we have to set the state of the peers disk (pdsk)
814 * to D_UNKNOWN. This rule and many more along those lines are in this function.
816 static union drbd_state
sanitize_state(struct drbd_conf
*mdev
, union drbd_state os
,
817 union drbd_state ns
, const char **warn_sync_abort
)
819 enum drbd_fencing_p fp
;
822 if (get_ldev(mdev
)) {
823 fp
= mdev
->ldev
->dc
.fencing
;
827 /* Disallow Network errors to configure a device's network part */
828 if ((ns
.conn
>= C_TIMEOUT
&& ns
.conn
<= C_TEAR_DOWN
) &&
829 os
.conn
<= C_DISCONNECTING
)
832 /* After a network error (+C_TEAR_DOWN) only C_UNCONNECTED or C_DISCONNECTING can follow.
833 * If you try to go into some Sync* state, that shall fail (elsewhere). */
834 if (os
.conn
>= C_TIMEOUT
&& os
.conn
<= C_TEAR_DOWN
&&
835 ns
.conn
!= C_UNCONNECTED
&& ns
.conn
!= C_DISCONNECTING
&& ns
.conn
<= C_TEAR_DOWN
)
838 /* we cannot fail (again) if we already detached */
839 if (ns
.disk
== D_FAILED
&& os
.disk
== D_DISKLESS
)
840 ns
.disk
= D_DISKLESS
;
842 /* if we are only D_ATTACHING yet,
843 * we can (and should) go directly to D_DISKLESS. */
844 if (ns
.disk
== D_FAILED
&& os
.disk
== D_ATTACHING
)
845 ns
.disk
= D_DISKLESS
;
847 /* After C_DISCONNECTING only C_STANDALONE may follow */
848 if (os
.conn
== C_DISCONNECTING
&& ns
.conn
!= C_STANDALONE
)
851 if (ns
.conn
< C_CONNECTED
) {
854 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
858 /* Clear the aftr_isp when becoming unconfigured */
859 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
862 /* Abort resync if a disk fails/detaches */
863 if (os
.conn
> C_CONNECTED
&& ns
.conn
> C_CONNECTED
&&
864 (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
867 os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
?
868 "Online-verify" : "Resync";
869 ns
.conn
= C_CONNECTED
;
872 if (ns
.conn
>= C_CONNECTED
&&
873 ((ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
) ||
874 (ns
.disk
== D_NEGOTIATING
&& ns
.conn
== C_WF_BITMAP_T
))) {
877 case C_PAUSED_SYNC_T
:
878 ns
.disk
= D_OUTDATED
;
883 case C_PAUSED_SYNC_S
:
884 ns
.disk
= D_UP_TO_DATE
;
887 ns
.disk
= D_INCONSISTENT
;
888 dev_warn(DEV
, "Implicitly set disk state Inconsistent!\n");
891 if (os
.disk
== D_OUTDATED
&& ns
.disk
== D_UP_TO_DATE
)
892 dev_warn(DEV
, "Implicitly set disk from Outdated to UpToDate\n");
895 if (ns
.conn
>= C_CONNECTED
&&
896 (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)) {
900 case C_PAUSED_SYNC_T
:
902 ns
.pdsk
= D_UP_TO_DATE
;
905 case C_PAUSED_SYNC_S
:
906 /* remap any consistent state to D_OUTDATED,
907 * but disallow "upgrade" of not even consistent states.
910 (D_DISKLESS
< os
.pdsk
&& os
.pdsk
< D_OUTDATED
)
911 ? os
.pdsk
: D_OUTDATED
;
914 ns
.pdsk
= D_INCONSISTENT
;
915 dev_warn(DEV
, "Implicitly set pdsk Inconsistent!\n");
918 if (os
.pdsk
== D_OUTDATED
&& ns
.pdsk
== D_UP_TO_DATE
)
919 dev_warn(DEV
, "Implicitly set pdsk from Outdated to UpToDate\n");
922 /* Connection breaks down before we finished "Negotiating" */
923 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
924 get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
925 if (mdev
->ed_uuid
== mdev
->ldev
->md
.uuid
[UI_CURRENT
]) {
926 ns
.disk
= mdev
->new_state_tmp
.disk
;
927 ns
.pdsk
= mdev
->new_state_tmp
.pdsk
;
929 dev_alert(DEV
, "Connection lost while negotiating, no data!\n");
930 ns
.disk
= D_DISKLESS
;
936 if (fp
== FP_STONITH
&&
937 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
) &&
938 !(os
.role
== R_PRIMARY
&& os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
))
939 ns
.susp_fen
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
941 if (mdev
->sync_conf
.on_no_data
== OND_SUSPEND_IO
&&
942 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
) &&
943 !(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
))
944 ns
.susp_nod
= 1; /* Suspend IO while no data available (no accessible data available) */
946 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
947 if (ns
.conn
== C_SYNC_SOURCE
)
948 ns
.conn
= C_PAUSED_SYNC_S
;
949 if (ns
.conn
== C_SYNC_TARGET
)
950 ns
.conn
= C_PAUSED_SYNC_T
;
952 if (ns
.conn
== C_PAUSED_SYNC_S
)
953 ns
.conn
= C_SYNC_SOURCE
;
954 if (ns
.conn
== C_PAUSED_SYNC_T
)
955 ns
.conn
= C_SYNC_TARGET
;
961 /* helper for __drbd_set_state */
962 static void set_ov_position(struct drbd_conf
*mdev
, enum drbd_conns cs
)
964 if (cs
== C_VERIFY_T
) {
965 /* starting online verify from an arbitrary position
966 * does not fit well into the existing protocol.
967 * on C_VERIFY_T, we initialize ov_left and friends
968 * implicitly in receive_DataRequest once the
969 * first P_OV_REQUEST is received */
970 mdev
->ov_start_sector
= ~(sector_t
)0;
972 unsigned long bit
= BM_SECT_TO_BIT(mdev
->ov_start_sector
);
973 if (bit
>= mdev
->rs_total
)
974 mdev
->ov_start_sector
=
975 BM_BIT_TO_SECT(mdev
->rs_total
- 1);
976 mdev
->ov_position
= mdev
->ov_start_sector
;
980 static void drbd_resume_al(struct drbd_conf
*mdev
)
982 if (test_and_clear_bit(AL_SUSPENDED
, &mdev
->flags
))
983 dev_info(DEV
, "Resumed AL updates\n");
987 * __drbd_set_state() - Set a new DRBD state
988 * @mdev: DRBD device.
991 * @done: Optional completion, that will get completed after the after_state_ch() finished
993 * Caller needs to hold req_lock, and global_state_lock. Do not call directly.
995 int __drbd_set_state(struct drbd_conf
*mdev
,
996 union drbd_state ns
, enum chg_state_flags flags
,
997 struct completion
*done
)
1000 int rv
= SS_SUCCESS
;
1001 const char *warn_sync_abort
= NULL
;
1002 struct after_state_chg_work
*ascw
;
1006 ns
= sanitize_state(mdev
, os
, ns
, &warn_sync_abort
);
1009 return SS_NOTHING_TO_DO
;
1011 if (!(flags
& CS_HARD
)) {
1012 /* pre-state-change checks ; only look at ns */
1013 /* See drbd_state_sw_errors in drbd_strings.c */
1015 rv
= is_valid_state(mdev
, ns
);
1016 if (rv
< SS_SUCCESS
) {
1017 /* If the old state was illegal as well, then let
1020 if (is_valid_state(mdev
, os
) == rv
)
1021 rv
= is_valid_state_transition(mdev
, ns
, os
);
1023 rv
= is_valid_state_transition(mdev
, ns
, os
);
1026 if (rv
< SS_SUCCESS
) {
1027 if (flags
& CS_VERBOSE
)
1028 print_st_err(mdev
, os
, ns
, rv
);
1032 if (warn_sync_abort
)
1033 dev_warn(DEV
, "%s aborted.\n", warn_sync_abort
);
1044 if (is_susp(ns
) != is_susp(os
))
1045 pbp
+= sprintf(pbp
, "susp( %s -> %s ) ",
1046 drbd_susp_str(is_susp(os
)),
1047 drbd_susp_str(is_susp(ns
)));
1051 dev_info(DEV
, "%s\n", pb
);
1054 /* solve the race between becoming unconfigured,
1055 * worker doing the cleanup, and
1056 * admin reconfiguring us:
1057 * on (re)configure, first set CONFIG_PENDING,
1058 * then wait for a potentially exiting worker,
1059 * start the worker, and schedule one no_op.
1060 * then proceed with configuration.
1062 if (ns
.disk
== D_DISKLESS
&&
1063 ns
.conn
== C_STANDALONE
&&
1064 ns
.role
== R_SECONDARY
&&
1065 !test_and_set_bit(CONFIG_PENDING
, &mdev
->flags
))
1066 set_bit(DEVICE_DYING
, &mdev
->flags
);
1068 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1069 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1070 * drbd_ldev_destroy() won't happen before our corresponding
1071 * after_state_ch works run, where we put_ldev again. */
1072 if ((os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) ||
1073 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))
1074 atomic_inc(&mdev
->local_cnt
);
1077 wake_up(&mdev
->misc_wait
);
1078 wake_up(&mdev
->state_wait
);
1080 /* aborted verify run. log the last position */
1081 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1082 ns
.conn
< C_CONNECTED
) {
1083 mdev
->ov_start_sector
=
1084 BM_BIT_TO_SECT(mdev
->rs_total
- mdev
->ov_left
);
1085 dev_info(DEV
, "Online Verify reached sector %llu\n",
1086 (unsigned long long)mdev
->ov_start_sector
);
1089 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1090 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1091 dev_info(DEV
, "Syncer continues.\n");
1092 mdev
->rs_paused
+= (long)jiffies
1093 -(long)mdev
->rs_mark_time
[mdev
->rs_last_mark
];
1094 if (ns
.conn
== C_SYNC_TARGET
)
1095 mod_timer(&mdev
->resync_timer
, jiffies
);
1098 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1099 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1100 dev_info(DEV
, "Resync suspended\n");
1101 mdev
->rs_mark_time
[mdev
->rs_last_mark
] = jiffies
;
1104 if (os
.conn
== C_CONNECTED
&&
1105 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1106 unsigned long now
= jiffies
;
1109 mdev
->ov_position
= 0;
1110 mdev
->rs_total
= drbd_bm_bits(mdev
);
1111 if (mdev
->agreed_pro_version
>= 90)
1112 set_ov_position(mdev
, ns
.conn
);
1114 mdev
->ov_start_sector
= 0;
1115 mdev
->ov_left
= mdev
->rs_total
1116 - BM_SECT_TO_BIT(mdev
->ov_position
);
1117 mdev
->rs_start
= now
;
1118 mdev
->rs_last_events
= 0;
1119 mdev
->rs_last_sect_ev
= 0;
1120 mdev
->ov_last_oos_size
= 0;
1121 mdev
->ov_last_oos_start
= 0;
1123 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1124 mdev
->rs_mark_left
[i
] = mdev
->rs_total
;
1125 mdev
->rs_mark_time
[i
] = now
;
1128 if (ns
.conn
== C_VERIFY_S
) {
1129 dev_info(DEV
, "Starting Online Verify from sector %llu\n",
1130 (unsigned long long)mdev
->ov_position
);
1131 mod_timer(&mdev
->resync_timer
, jiffies
);
1135 if (get_ldev(mdev
)) {
1136 u32 mdf
= mdev
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1137 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1138 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1140 if (test_bit(CRASHED_PRIMARY
, &mdev
->flags
))
1141 mdf
|= MDF_CRASHED_PRIMARY
;
1142 if (mdev
->state
.role
== R_PRIMARY
||
1143 (mdev
->state
.pdsk
< D_INCONSISTENT
&& mdev
->state
.peer
== R_PRIMARY
))
1144 mdf
|= MDF_PRIMARY_IND
;
1145 if (mdev
->state
.conn
> C_WF_REPORT_PARAMS
)
1146 mdf
|= MDF_CONNECTED_IND
;
1147 if (mdev
->state
.disk
> D_INCONSISTENT
)
1148 mdf
|= MDF_CONSISTENT
;
1149 if (mdev
->state
.disk
> D_OUTDATED
)
1150 mdf
|= MDF_WAS_UP_TO_DATE
;
1151 if (mdev
->state
.pdsk
<= D_OUTDATED
&& mdev
->state
.pdsk
>= D_INCONSISTENT
)
1152 mdf
|= MDF_PEER_OUT_DATED
;
1153 if (mdf
!= mdev
->ldev
->md
.flags
) {
1154 mdev
->ldev
->md
.flags
= mdf
;
1155 drbd_md_mark_dirty(mdev
);
1157 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1158 drbd_set_ed_uuid(mdev
, mdev
->ldev
->md
.uuid
[UI_CURRENT
]);
1162 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1163 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1164 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1165 set_bit(CONSIDER_RESYNC
, &mdev
->flags
);
1167 /* Receiver should clean up itself */
1168 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1169 drbd_thread_stop_nowait(&mdev
->receiver
);
1171 /* Now the receiver finished cleaning up itself, it should die */
1172 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1173 drbd_thread_stop_nowait(&mdev
->receiver
);
1175 /* Upon network failure, we need to restart the receiver. */
1176 if (os
.conn
> C_TEAR_DOWN
&&
1177 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1178 drbd_thread_restart_nowait(&mdev
->receiver
);
1180 /* Resume AL writing if we get a connection */
1181 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
)
1182 drbd_resume_al(mdev
);
1184 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1188 ascw
->flags
= flags
;
1189 ascw
->w
.cb
= w_after_state_ch
;
1191 drbd_queue_work(&mdev
->data
.work
, &ascw
->w
);
1193 dev_warn(DEV
, "Could not kmalloc an ascw\n");
1199 static int w_after_state_ch(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
1201 struct after_state_chg_work
*ascw
=
1202 container_of(w
, struct after_state_chg_work
, w
);
1203 after_state_ch(mdev
, ascw
->os
, ascw
->ns
, ascw
->flags
);
1204 if (ascw
->flags
& CS_WAIT_COMPLETE
) {
1205 D_ASSERT(ascw
->done
!= NULL
);
1206 complete(ascw
->done
);
1213 static void abw_start_sync(struct drbd_conf
*mdev
, int rv
)
1216 dev_err(DEV
, "Writing the bitmap failed not starting resync.\n");
1217 _drbd_request_state(mdev
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1221 switch (mdev
->state
.conn
) {
1222 case C_STARTING_SYNC_T
:
1223 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1225 case C_STARTING_SYNC_S
:
1226 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
1232 * after_state_ch() - Perform after state change actions that may sleep
1233 * @mdev: DRBD device.
1238 static void after_state_ch(struct drbd_conf
*mdev
, union drbd_state os
,
1239 union drbd_state ns
, enum chg_state_flags flags
)
1241 enum drbd_fencing_p fp
;
1242 enum drbd_req_event what
= nothing
;
1243 union drbd_state nsm
= (union drbd_state
){ .i
= -1 };
1245 if (os
.conn
!= C_CONNECTED
&& ns
.conn
== C_CONNECTED
) {
1246 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1248 mdev
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1252 if (get_ldev(mdev
)) {
1253 fp
= mdev
->ldev
->dc
.fencing
;
1257 /* Inform userspace about the change... */
1258 drbd_bcast_state(mdev
, ns
);
1260 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1261 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1262 drbd_khelper(mdev
, "pri-on-incon-degr");
1264 /* Here we have the actions that are performed after a
1265 state change. This function might sleep */
1269 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1270 if (ns
.conn
== C_CONNECTED
)
1271 what
= resend
, nsm
.susp_nod
= 0;
1272 else /* ns.conn > C_CONNECTED */
1273 dev_err(DEV
, "Unexpected Resynd going on!\n");
1276 if (os
.disk
== D_ATTACHING
&& ns
.disk
> D_ATTACHING
)
1277 what
= restart_frozen_disk_io
, nsm
.susp_nod
= 0;
1282 /* case1: The outdate peer handler is successful: */
1283 if (os
.pdsk
> D_OUTDATED
&& ns
.pdsk
<= D_OUTDATED
) {
1285 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
1286 drbd_uuid_new_current(mdev
);
1287 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1289 spin_lock_irq(&mdev
->req_lock
);
1290 _drbd_set_state(_NS(mdev
, susp_fen
, 0), CS_VERBOSE
, NULL
);
1291 spin_unlock_irq(&mdev
->req_lock
);
1293 /* case2: The connection was established again: */
1294 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1295 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
1301 if (what
!= nothing
) {
1302 spin_lock_irq(&mdev
->req_lock
);
1303 _tl_restart(mdev
, what
);
1304 nsm
.i
&= mdev
->state
.i
;
1305 _drbd_set_state(mdev
, nsm
, CS_VERBOSE
, NULL
);
1306 spin_unlock_irq(&mdev
->req_lock
);
1309 /* Do not change the order of the if above and the two below... */
1310 if (os
.pdsk
== D_DISKLESS
&& ns
.pdsk
> D_DISKLESS
) { /* attach on the peer */
1311 drbd_send_uuids(mdev
);
1312 drbd_send_state(mdev
);
1314 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
)
1315 drbd_queue_bitmap_io(mdev
, &drbd_send_bitmap
, NULL
, "send_bitmap (WFBitMapS)");
1317 /* Lost contact to peer's copy of the data */
1318 if ((os
.pdsk
>= D_INCONSISTENT
&&
1319 os
.pdsk
!= D_UNKNOWN
&&
1320 os
.pdsk
!= D_OUTDATED
)
1321 && (ns
.pdsk
< D_INCONSISTENT
||
1322 ns
.pdsk
== D_UNKNOWN
||
1323 ns
.pdsk
== D_OUTDATED
)) {
1324 if (get_ldev(mdev
)) {
1325 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1326 mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1327 if (is_susp(mdev
->state
)) {
1328 set_bit(NEW_CUR_UUID
, &mdev
->flags
);
1330 drbd_uuid_new_current(mdev
);
1331 drbd_send_uuids(mdev
);
1338 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(mdev
)) {
1339 if (ns
.peer
== R_PRIMARY
&& mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0) {
1340 drbd_uuid_new_current(mdev
);
1341 drbd_send_uuids(mdev
);
1344 /* D_DISKLESS Peer becomes secondary */
1345 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1346 drbd_al_to_on_disk_bm(mdev
);
1350 /* Last part of the attaching process ... */
1351 if (ns
.conn
>= C_CONNECTED
&&
1352 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1353 drbd_send_sizes(mdev
, 0, 0); /* to start sync... */
1354 drbd_send_uuids(mdev
);
1355 drbd_send_state(mdev
);
1358 /* We want to pause/continue resync, tell peer. */
1359 if (ns
.conn
>= C_CONNECTED
&&
1360 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1361 (os
.user_isp
!= ns
.user_isp
)))
1362 drbd_send_state(mdev
);
1364 /* In case one of the isp bits got set, suspend other devices. */
1365 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1366 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1367 suspend_other_sg(mdev
);
1369 /* Make sure the peer gets informed about eventual state
1370 changes (ISP bits) while we were in WFReportParams. */
1371 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1372 drbd_send_state(mdev
);
1374 /* We are in the progress to start a full sync... */
1375 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1376 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1377 drbd_queue_bitmap_io(mdev
, &drbd_bmio_set_n_write
, &abw_start_sync
, "set_n_write from StartingSync");
1379 /* We are invalidating our self... */
1380 if (os
.conn
< C_CONNECTED
&& ns
.conn
< C_CONNECTED
&&
1381 os
.disk
> D_INCONSISTENT
&& ns
.disk
== D_INCONSISTENT
)
1382 drbd_queue_bitmap_io(mdev
, &drbd_bmio_set_n_write
, NULL
, "set_n_write from invalidate");
1384 /* first half of local IO error, failure to attach,
1385 * or administrative detach */
1386 if (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) {
1387 enum drbd_io_error_p eh
;
1389 /* corresponding get_ldev was in __drbd_set_state, to serialize
1390 * our cleanup here with the transition to D_DISKLESS,
1391 * so it is safe to dreference ldev here. */
1392 eh
= mdev
->ldev
->dc
.on_io_error
;
1393 was_io_error
= test_and_clear_bit(WAS_IO_ERROR
, &mdev
->flags
);
1395 /* current state still has to be D_FAILED,
1396 * there is only one way out: to D_DISKLESS,
1397 * and that may only happen after our put_ldev below. */
1398 if (mdev
->state
.disk
!= D_FAILED
)
1400 "ASSERT FAILED: disk is %s during detach\n",
1401 drbd_disk_str(mdev
->state
.disk
));
1403 if (drbd_send_state(mdev
))
1404 dev_warn(DEV
, "Notified peer that I am detaching my disk\n");
1406 dev_err(DEV
, "Sending state for detaching disk failed\n");
1408 drbd_rs_cancel_all(mdev
);
1410 /* In case we want to get something to stable storage still,
1411 * this may be the last chance.
1412 * Following put_ldev may transition to D_DISKLESS. */
1416 if (was_io_error
&& eh
== EP_CALL_HELPER
)
1417 drbd_khelper(mdev
, "local-io-error");
1420 /* second half of local IO error, failure to attach,
1421 * or administrative detach,
1422 * after local_cnt references have reached zero again */
1423 if (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1424 /* We must still be diskless,
1425 * re-attach has to be serialized with this! */
1426 if (mdev
->state
.disk
!= D_DISKLESS
)
1428 "ASSERT FAILED: disk is %s while going diskless\n",
1429 drbd_disk_str(mdev
->state
.disk
));
1432 mdev
->rs_failed
= 0;
1433 atomic_set(&mdev
->rs_pending_cnt
, 0);
1435 if (drbd_send_state(mdev
))
1436 dev_warn(DEV
, "Notified peer that I'm now diskless.\n");
1438 dev_err(DEV
, "Sending state for being diskless failed\n");
1439 /* corresponding get_ldev in __drbd_set_state
1440 * this may finaly trigger drbd_ldev_destroy. */
1444 /* Disks got bigger while they were detached */
1445 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1446 test_and_clear_bit(RESYNC_AFTER_NEG
, &mdev
->flags
)) {
1447 if (ns
.conn
== C_CONNECTED
)
1448 resync_after_online_grow(mdev
);
1451 /* A resync finished or aborted, wake paused devices... */
1452 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1453 (os
.peer_isp
&& !ns
.peer_isp
) ||
1454 (os
.user_isp
&& !ns
.user_isp
))
1455 resume_next_sg(mdev
);
1457 /* sync target done with resync. Explicitly notify peer, even though
1458 * it should (at least for non-empty resyncs) already know itself. */
1459 if (os
.disk
< D_UP_TO_DATE
&& os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
== C_CONNECTED
)
1460 drbd_send_state(mdev
);
1462 /* free tl_hash if we Got thawed and are C_STANDALONE */
1463 if (ns
.conn
== C_STANDALONE
&& !is_susp(ns
) && mdev
->tl_hash
)
1464 drbd_free_tl_hash(mdev
);
1466 /* Upon network connection, we need to start the receiver */
1467 if (os
.conn
== C_STANDALONE
&& ns
.conn
== C_UNCONNECTED
)
1468 drbd_thread_start(&mdev
->receiver
);
1470 /* Terminate worker thread if we are unconfigured - it will be
1471 restarted as needed... */
1472 if (ns
.disk
== D_DISKLESS
&&
1473 ns
.conn
== C_STANDALONE
&&
1474 ns
.role
== R_SECONDARY
) {
1475 if (os
.aftr_isp
!= ns
.aftr_isp
)
1476 resume_next_sg(mdev
);
1477 /* set in __drbd_set_state, unless CONFIG_PENDING was set */
1478 if (test_bit(DEVICE_DYING
, &mdev
->flags
))
1479 drbd_thread_stop_nowait(&mdev
->worker
);
1486 static int drbd_thread_setup(void *arg
)
1488 struct drbd_thread
*thi
= (struct drbd_thread
*) arg
;
1489 struct drbd_conf
*mdev
= thi
->mdev
;
1490 unsigned long flags
;
1494 retval
= thi
->function(thi
);
1496 spin_lock_irqsave(&thi
->t_lock
, flags
);
1498 /* if the receiver has been "Exiting", the last thing it did
1499 * was set the conn state to "StandAlone",
1500 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
1501 * and receiver thread will be "started".
1502 * drbd_thread_start needs to set "Restarting" in that case.
1503 * t_state check and assignment needs to be within the same spinlock,
1504 * so either thread_start sees Exiting, and can remap to Restarting,
1505 * or thread_start see None, and can proceed as normal.
1508 if (thi
->t_state
== Restarting
) {
1509 dev_info(DEV
, "Restarting %s\n", current
->comm
);
1510 thi
->t_state
= Running
;
1511 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1516 thi
->t_state
= None
;
1518 complete(&thi
->stop
);
1519 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1521 dev_info(DEV
, "Terminating %s\n", current
->comm
);
1523 /* Release mod reference taken when thread was started */
1524 module_put(THIS_MODULE
);
1528 static void drbd_thread_init(struct drbd_conf
*mdev
, struct drbd_thread
*thi
,
1529 int (*func
) (struct drbd_thread
*))
1531 spin_lock_init(&thi
->t_lock
);
1533 thi
->t_state
= None
;
1534 thi
->function
= func
;
1538 int drbd_thread_start(struct drbd_thread
*thi
)
1540 struct drbd_conf
*mdev
= thi
->mdev
;
1541 struct task_struct
*nt
;
1542 unsigned long flags
;
1545 thi
== &mdev
->receiver
? "receiver" :
1546 thi
== &mdev
->asender
? "asender" :
1547 thi
== &mdev
->worker
? "worker" : "NONSENSE";
1549 /* is used from state engine doing drbd_thread_stop_nowait,
1550 * while holding the req lock irqsave */
1551 spin_lock_irqsave(&thi
->t_lock
, flags
);
1553 switch (thi
->t_state
) {
1555 dev_info(DEV
, "Starting %s thread (from %s [%d])\n",
1556 me
, current
->comm
, current
->pid
);
1558 /* Get ref on module for thread - this is released when thread exits */
1559 if (!try_module_get(THIS_MODULE
)) {
1560 dev_err(DEV
, "Failed to get module reference in drbd_thread_start\n");
1561 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1565 init_completion(&thi
->stop
);
1566 D_ASSERT(thi
->task
== NULL
);
1567 thi
->reset_cpu_mask
= 1;
1568 thi
->t_state
= Running
;
1569 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1570 flush_signals(current
); /* otherw. may get -ERESTARTNOINTR */
1572 nt
= kthread_create(drbd_thread_setup
, (void *) thi
,
1573 "drbd%d_%s", mdev_to_minor(mdev
), me
);
1576 dev_err(DEV
, "Couldn't start thread\n");
1578 module_put(THIS_MODULE
);
1581 spin_lock_irqsave(&thi
->t_lock
, flags
);
1583 thi
->t_state
= Running
;
1584 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1585 wake_up_process(nt
);
1588 thi
->t_state
= Restarting
;
1589 dev_info(DEV
, "Restarting %s thread (from %s [%d])\n",
1590 me
, current
->comm
, current
->pid
);
1595 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1603 void _drbd_thread_stop(struct drbd_thread
*thi
, int restart
, int wait
)
1605 unsigned long flags
;
1607 enum drbd_thread_state ns
= restart
? Restarting
: Exiting
;
1609 /* may be called from state engine, holding the req lock irqsave */
1610 spin_lock_irqsave(&thi
->t_lock
, flags
);
1612 if (thi
->t_state
== None
) {
1613 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1615 drbd_thread_start(thi
);
1619 if (thi
->t_state
!= ns
) {
1620 if (thi
->task
== NULL
) {
1621 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1627 init_completion(&thi
->stop
);
1628 if (thi
->task
!= current
)
1629 force_sig(DRBD_SIGKILL
, thi
->task
);
1633 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
1636 wait_for_completion(&thi
->stop
);
1641 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
1642 * @mdev: DRBD device.
1644 * Forces all threads of a device onto the same CPU. This is beneficial for
1645 * DRBD's performance. May be overwritten by user's configuration.
1647 void drbd_calc_cpu_mask(struct drbd_conf
*mdev
)
1651 /* user override. */
1652 if (cpumask_weight(mdev
->cpu_mask
))
1655 ord
= mdev_to_minor(mdev
) % cpumask_weight(cpu_online_mask
);
1656 for_each_online_cpu(cpu
) {
1658 cpumask_set_cpu(cpu
, mdev
->cpu_mask
);
1662 /* should not be reached */
1663 cpumask_setall(mdev
->cpu_mask
);
1667 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
1668 * @mdev: DRBD device.
1670 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
1673 void drbd_thread_current_set_cpu(struct drbd_conf
*mdev
)
1675 struct task_struct
*p
= current
;
1676 struct drbd_thread
*thi
=
1677 p
== mdev
->asender
.task
? &mdev
->asender
:
1678 p
== mdev
->receiver
.task
? &mdev
->receiver
:
1679 p
== mdev
->worker
.task
? &mdev
->worker
:
1683 if (!thi
->reset_cpu_mask
)
1685 thi
->reset_cpu_mask
= 0;
1686 set_cpus_allowed_ptr(p
, mdev
->cpu_mask
);
1690 /* the appropriate socket mutex must be held already */
1691 int _drbd_send_cmd(struct drbd_conf
*mdev
, struct socket
*sock
,
1692 enum drbd_packets cmd
, struct p_header80
*h
,
1693 size_t size
, unsigned msg_flags
)
1697 ERR_IF(!h
) return FALSE
;
1698 ERR_IF(!size
) return FALSE
;
1700 h
->magic
= BE_DRBD_MAGIC
;
1701 h
->command
= cpu_to_be16(cmd
);
1702 h
->length
= cpu_to_be16(size
-sizeof(struct p_header80
));
1704 sent
= drbd_send(mdev
, sock
, h
, size
, msg_flags
);
1706 ok
= (sent
== size
);
1708 dev_err(DEV
, "short sent %s size=%d sent=%d\n",
1709 cmdname(cmd
), (int)size
, sent
);
1713 /* don't pass the socket. we may only look at it
1714 * when we hold the appropriate socket mutex.
1716 int drbd_send_cmd(struct drbd_conf
*mdev
, int use_data_socket
,
1717 enum drbd_packets cmd
, struct p_header80
*h
, size_t size
)
1720 struct socket
*sock
;
1722 if (use_data_socket
) {
1723 mutex_lock(&mdev
->data
.mutex
);
1724 sock
= mdev
->data
.socket
;
1726 mutex_lock(&mdev
->meta
.mutex
);
1727 sock
= mdev
->meta
.socket
;
1730 /* drbd_disconnect() could have called drbd_free_sock()
1731 * while we were waiting in down()... */
1732 if (likely(sock
!= NULL
))
1733 ok
= _drbd_send_cmd(mdev
, sock
, cmd
, h
, size
, 0);
1735 if (use_data_socket
)
1736 mutex_unlock(&mdev
->data
.mutex
);
1738 mutex_unlock(&mdev
->meta
.mutex
);
1742 int drbd_send_cmd2(struct drbd_conf
*mdev
, enum drbd_packets cmd
, char *data
,
1745 struct p_header80 h
;
1748 h
.magic
= BE_DRBD_MAGIC
;
1749 h
.command
= cpu_to_be16(cmd
);
1750 h
.length
= cpu_to_be16(size
);
1752 if (!drbd_get_data_sock(mdev
))
1756 drbd_send(mdev
, mdev
->data
.socket
, &h
, sizeof(h
), 0));
1758 drbd_send(mdev
, mdev
->data
.socket
, data
, size
, 0));
1760 drbd_put_data_sock(mdev
);
1765 int drbd_send_sync_param(struct drbd_conf
*mdev
, struct syncer_conf
*sc
)
1767 struct p_rs_param_95
*p
;
1768 struct socket
*sock
;
1770 const int apv
= mdev
->agreed_pro_version
;
1772 size
= apv
<= 87 ? sizeof(struct p_rs_param
)
1773 : apv
== 88 ? sizeof(struct p_rs_param
)
1774 + strlen(mdev
->sync_conf
.verify_alg
) + 1
1775 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
1776 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
1778 /* used from admin command context and receiver/worker context.
1779 * to avoid kmalloc, grab the socket right here,
1780 * then use the pre-allocated sbuf there */
1781 mutex_lock(&mdev
->data
.mutex
);
1782 sock
= mdev
->data
.socket
;
1784 if (likely(sock
!= NULL
)) {
1785 enum drbd_packets cmd
= apv
>= 89 ? P_SYNC_PARAM89
: P_SYNC_PARAM
;
1787 p
= &mdev
->data
.sbuf
.rs_param_95
;
1789 /* initialize verify_alg and csums_alg */
1790 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
1792 p
->rate
= cpu_to_be32(sc
->rate
);
1793 p
->c_plan_ahead
= cpu_to_be32(sc
->c_plan_ahead
);
1794 p
->c_delay_target
= cpu_to_be32(sc
->c_delay_target
);
1795 p
->c_fill_target
= cpu_to_be32(sc
->c_fill_target
);
1796 p
->c_max_rate
= cpu_to_be32(sc
->c_max_rate
);
1799 strcpy(p
->verify_alg
, mdev
->sync_conf
.verify_alg
);
1801 strcpy(p
->csums_alg
, mdev
->sync_conf
.csums_alg
);
1803 rv
= _drbd_send_cmd(mdev
, sock
, cmd
, &p
->head
, size
, 0);
1805 rv
= 0; /* not ok */
1807 mutex_unlock(&mdev
->data
.mutex
);
1812 int drbd_send_protocol(struct drbd_conf
*mdev
)
1814 struct p_protocol
*p
;
1817 size
= sizeof(struct p_protocol
);
1819 if (mdev
->agreed_pro_version
>= 87)
1820 size
+= strlen(mdev
->net_conf
->integrity_alg
) + 1;
1822 /* we must not recurse into our own queue,
1823 * as that is blocked during handshake */
1824 p
= kmalloc(size
, GFP_NOIO
);
1828 p
->protocol
= cpu_to_be32(mdev
->net_conf
->wire_protocol
);
1829 p
->after_sb_0p
= cpu_to_be32(mdev
->net_conf
->after_sb_0p
);
1830 p
->after_sb_1p
= cpu_to_be32(mdev
->net_conf
->after_sb_1p
);
1831 p
->after_sb_2p
= cpu_to_be32(mdev
->net_conf
->after_sb_2p
);
1832 p
->two_primaries
= cpu_to_be32(mdev
->net_conf
->two_primaries
);
1835 if (mdev
->net_conf
->want_lose
)
1837 if (mdev
->net_conf
->dry_run
) {
1838 if (mdev
->agreed_pro_version
>= 92)
1841 dev_err(DEV
, "--dry-run is not supported by peer");
1846 p
->conn_flags
= cpu_to_be32(cf
);
1848 if (mdev
->agreed_pro_version
>= 87)
1849 strcpy(p
->integrity_alg
, mdev
->net_conf
->integrity_alg
);
1851 rv
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_PROTOCOL
,
1852 (struct p_header80
*)p
, size
);
1857 int _drbd_send_uuids(struct drbd_conf
*mdev
, u64 uuid_flags
)
1862 if (!get_ldev_if_state(mdev
, D_NEGOTIATING
))
1865 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
1866 p
.uuid
[i
] = mdev
->ldev
? cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]) : 0;
1868 mdev
->comm_bm_set
= drbd_bm_total_weight(mdev
);
1869 p
.uuid
[UI_SIZE
] = cpu_to_be64(mdev
->comm_bm_set
);
1870 uuid_flags
|= mdev
->net_conf
->want_lose
? 1 : 0;
1871 uuid_flags
|= test_bit(CRASHED_PRIMARY
, &mdev
->flags
) ? 2 : 0;
1872 uuid_flags
|= mdev
->new_state_tmp
.disk
== D_INCONSISTENT
? 4 : 0;
1873 p
.uuid
[UI_FLAGS
] = cpu_to_be64(uuid_flags
);
1877 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_UUIDS
,
1878 (struct p_header80
*)&p
, sizeof(p
));
1881 int drbd_send_uuids(struct drbd_conf
*mdev
)
1883 return _drbd_send_uuids(mdev
, 0);
1886 int drbd_send_uuids_skip_initial_sync(struct drbd_conf
*mdev
)
1888 return _drbd_send_uuids(mdev
, 8);
1892 int drbd_send_sync_uuid(struct drbd_conf
*mdev
, u64 val
)
1896 p
.uuid
= cpu_to_be64(val
);
1898 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_SYNC_UUID
,
1899 (struct p_header80
*)&p
, sizeof(p
));
1902 int drbd_send_sizes(struct drbd_conf
*mdev
, int trigger_reply
, enum dds_flags flags
)
1905 sector_t d_size
, u_size
;
1909 if (get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
1910 D_ASSERT(mdev
->ldev
->backing_bdev
);
1911 d_size
= drbd_get_max_capacity(mdev
->ldev
);
1912 u_size
= mdev
->ldev
->dc
.disk_size
;
1913 q_order_type
= drbd_queue_order_type(mdev
);
1918 q_order_type
= QUEUE_ORDERED_NONE
;
1921 p
.d_size
= cpu_to_be64(d_size
);
1922 p
.u_size
= cpu_to_be64(u_size
);
1923 p
.c_size
= cpu_to_be64(trigger_reply
? 0 : drbd_get_capacity(mdev
->this_bdev
));
1924 p
.max_segment_size
= cpu_to_be32(queue_max_segment_size(mdev
->rq_queue
));
1925 p
.queue_order_type
= cpu_to_be16(q_order_type
);
1926 p
.dds_flags
= cpu_to_be16(flags
);
1928 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_SIZES
,
1929 (struct p_header80
*)&p
, sizeof(p
));
1934 * drbd_send_state() - Sends the drbd state to the peer
1935 * @mdev: DRBD device.
1937 int drbd_send_state(struct drbd_conf
*mdev
)
1939 struct socket
*sock
;
1943 /* Grab state lock so we wont send state if we're in the middle
1944 * of a cluster wide state change on another thread */
1945 drbd_state_lock(mdev
);
1947 mutex_lock(&mdev
->data
.mutex
);
1949 p
.state
= cpu_to_be32(mdev
->state
.i
); /* Within the send mutex */
1950 sock
= mdev
->data
.socket
;
1952 if (likely(sock
!= NULL
)) {
1953 ok
= _drbd_send_cmd(mdev
, sock
, P_STATE
,
1954 (struct p_header80
*)&p
, sizeof(p
), 0);
1957 mutex_unlock(&mdev
->data
.mutex
);
1959 drbd_state_unlock(mdev
);
1963 int drbd_send_state_req(struct drbd_conf
*mdev
,
1964 union drbd_state mask
, union drbd_state val
)
1966 struct p_req_state p
;
1968 p
.mask
= cpu_to_be32(mask
.i
);
1969 p
.val
= cpu_to_be32(val
.i
);
1971 return drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_STATE_CHG_REQ
,
1972 (struct p_header80
*)&p
, sizeof(p
));
1975 int drbd_send_sr_reply(struct drbd_conf
*mdev
, int retcode
)
1977 struct p_req_state_reply p
;
1979 p
.retcode
= cpu_to_be32(retcode
);
1981 return drbd_send_cmd(mdev
, USE_META_SOCKET
, P_STATE_CHG_REPLY
,
1982 (struct p_header80
*)&p
, sizeof(p
));
1985 int fill_bitmap_rle_bits(struct drbd_conf
*mdev
,
1986 struct p_compressed_bm
*p
,
1987 struct bm_xfer_ctx
*c
)
1989 struct bitstream bs
;
1990 unsigned long plain_bits
;
1997 /* may we use this feature? */
1998 if ((mdev
->sync_conf
.use_rle
== 0) ||
1999 (mdev
->agreed_pro_version
< 90))
2002 if (c
->bit_offset
>= c
->bm_bits
)
2003 return 0; /* nothing to do. */
2005 /* use at most thus many bytes */
2006 bitstream_init(&bs
, p
->code
, BM_PACKET_VLI_BYTES_MAX
, 0);
2007 memset(p
->code
, 0, BM_PACKET_VLI_BYTES_MAX
);
2008 /* plain bits covered in this code string */
2011 /* p->encoding & 0x80 stores whether the first run length is set.
2012 * bit offset is implicit.
2013 * start with toggle == 2 to be able to tell the first iteration */
2016 /* see how much plain bits we can stuff into one packet
2017 * using RLE and VLI. */
2019 tmp
= (toggle
== 0) ? _drbd_bm_find_next_zero(mdev
, c
->bit_offset
)
2020 : _drbd_bm_find_next(mdev
, c
->bit_offset
);
2023 rl
= tmp
- c
->bit_offset
;
2025 if (toggle
== 2) { /* first iteration */
2027 /* the first checked bit was set,
2028 * store start value, */
2029 DCBP_set_start(p
, 1);
2030 /* but skip encoding of zero run length */
2034 DCBP_set_start(p
, 0);
2037 /* paranoia: catch zero runlength.
2038 * can only happen if bitmap is modified while we scan it. */
2040 dev_err(DEV
, "unexpected zero runlength while encoding bitmap "
2041 "t:%u bo:%lu\n", toggle
, c
->bit_offset
);
2045 bits
= vli_encode_bits(&bs
, rl
);
2046 if (bits
== -ENOBUFS
) /* buffer full */
2049 dev_err(DEV
, "error while encoding bitmap: %d\n", bits
);
2055 c
->bit_offset
= tmp
;
2056 } while (c
->bit_offset
< c
->bm_bits
);
2058 len
= bs
.cur
.b
- p
->code
+ !!bs
.cur
.bit
;
2060 if (plain_bits
< (len
<< 3)) {
2061 /* incompressible with this method.
2062 * we need to rewind both word and bit position. */
2063 c
->bit_offset
-= plain_bits
;
2064 bm_xfer_ctx_bit_to_word_offset(c
);
2065 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
2069 /* RLE + VLI was able to compress it just fine.
2070 * update c->word_offset. */
2071 bm_xfer_ctx_bit_to_word_offset(c
);
2073 /* store pad_bits */
2074 DCBP_set_pad_bits(p
, (8 - bs
.cur
.bit
) & 0x7);
2079 enum { OK
, FAILED
, DONE
}
2080 send_bitmap_rle_or_plain(struct drbd_conf
*mdev
,
2081 struct p_header80
*h
, struct bm_xfer_ctx
*c
)
2083 struct p_compressed_bm
*p
= (void*)h
;
2084 unsigned long num_words
;
2088 len
= fill_bitmap_rle_bits(mdev
, p
, c
);
2094 DCBP_set_code(p
, RLE_VLI_Bits
);
2095 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_COMPRESSED_BITMAP
, h
,
2096 sizeof(*p
) + len
, 0);
2099 c
->bytes
[0] += sizeof(*p
) + len
;
2101 if (c
->bit_offset
>= c
->bm_bits
)
2104 /* was not compressible.
2105 * send a buffer full of plain text bits instead. */
2106 num_words
= min_t(size_t, BM_PACKET_WORDS
, c
->bm_words
- c
->word_offset
);
2107 len
= num_words
* sizeof(long);
2109 drbd_bm_get_lel(mdev
, c
->word_offset
, num_words
, (unsigned long*)h
->payload
);
2110 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_BITMAP
,
2111 h
, sizeof(struct p_header80
) + len
, 0);
2112 c
->word_offset
+= num_words
;
2113 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
2116 c
->bytes
[1] += sizeof(struct p_header80
) + len
;
2118 if (c
->bit_offset
> c
->bm_bits
)
2119 c
->bit_offset
= c
->bm_bits
;
2121 ok
= ok
? ((len
== 0) ? DONE
: OK
) : FAILED
;
2124 INFO_bm_xfer_stats(mdev
, "send", c
);
2128 /* See the comment at receive_bitmap() */
2129 int _drbd_send_bitmap(struct drbd_conf
*mdev
)
2131 struct bm_xfer_ctx c
;
2132 struct p_header80
*p
;
2135 ERR_IF(!mdev
->bitmap
) return FALSE
;
2137 /* maybe we should use some per thread scratch page,
2138 * and allocate that during initial device creation? */
2139 p
= (struct p_header80
*) __get_free_page(GFP_NOIO
);
2141 dev_err(DEV
, "failed to allocate one page buffer in %s\n", __func__
);
2145 if (get_ldev(mdev
)) {
2146 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
)) {
2147 dev_info(DEV
, "Writing the whole bitmap, MDF_FullSync was set.\n");
2148 drbd_bm_set_all(mdev
);
2149 if (drbd_bm_write(mdev
)) {
2150 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
2151 * but otherwise process as per normal - need to tell other
2152 * side that a full resync is required! */
2153 dev_err(DEV
, "Failed to write bitmap to disk!\n");
2155 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
2162 c
= (struct bm_xfer_ctx
) {
2163 .bm_bits
= drbd_bm_bits(mdev
),
2164 .bm_words
= drbd_bm_words(mdev
),
2168 ret
= send_bitmap_rle_or_plain(mdev
, p
, &c
);
2169 } while (ret
== OK
);
2171 free_page((unsigned long) p
);
2172 return (ret
== DONE
);
2175 int drbd_send_bitmap(struct drbd_conf
*mdev
)
2179 if (!drbd_get_data_sock(mdev
))
2181 err
= !_drbd_send_bitmap(mdev
);
2182 drbd_put_data_sock(mdev
);
2186 int drbd_send_b_ack(struct drbd_conf
*mdev
, u32 barrier_nr
, u32 set_size
)
2189 struct p_barrier_ack p
;
2191 p
.barrier
= barrier_nr
;
2192 p
.set_size
= cpu_to_be32(set_size
);
2194 if (mdev
->state
.conn
< C_CONNECTED
)
2196 ok
= drbd_send_cmd(mdev
, USE_META_SOCKET
, P_BARRIER_ACK
,
2197 (struct p_header80
*)&p
, sizeof(p
));
2202 * _drbd_send_ack() - Sends an ack packet
2203 * @mdev: DRBD device.
2204 * @cmd: Packet command code.
2205 * @sector: sector, needs to be in big endian byte order
2206 * @blksize: size in byte, needs to be in big endian byte order
2207 * @block_id: Id, big endian byte order
2209 static int _drbd_send_ack(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2215 struct p_block_ack p
;
2218 p
.block_id
= block_id
;
2219 p
.blksize
= blksize
;
2220 p
.seq_num
= cpu_to_be32(atomic_add_return(1, &mdev
->packet_seq
));
2222 if (!mdev
->meta
.socket
|| mdev
->state
.conn
< C_CONNECTED
)
2224 ok
= drbd_send_cmd(mdev
, USE_META_SOCKET
, cmd
,
2225 (struct p_header80
*)&p
, sizeof(p
));
2229 /* dp->sector and dp->block_id already/still in network byte order,
2230 * data_size is payload size according to dp->head,
2231 * and may need to be corrected for digest size. */
2232 int drbd_send_ack_dp(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2233 struct p_data
*dp
, int data_size
)
2235 data_size
-= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_r_tfm
) ?
2236 crypto_hash_digestsize(mdev
->integrity_r_tfm
) : 0;
2237 return _drbd_send_ack(mdev
, cmd
, dp
->sector
, cpu_to_be32(data_size
),
2241 int drbd_send_ack_rp(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2242 struct p_block_req
*rp
)
2244 return _drbd_send_ack(mdev
, cmd
, rp
->sector
, rp
->blksize
, rp
->block_id
);
2248 * drbd_send_ack() - Sends an ack packet
2249 * @mdev: DRBD device.
2250 * @cmd: Packet command code.
2253 int drbd_send_ack(struct drbd_conf
*mdev
,
2254 enum drbd_packets cmd
, struct drbd_epoch_entry
*e
)
2256 return _drbd_send_ack(mdev
, cmd
,
2257 cpu_to_be64(e
->sector
),
2258 cpu_to_be32(e
->size
),
2262 /* This function misuses the block_id field to signal if the blocks
2263 * are is sync or not. */
2264 int drbd_send_ack_ex(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2265 sector_t sector
, int blksize
, u64 block_id
)
2267 return _drbd_send_ack(mdev
, cmd
,
2268 cpu_to_be64(sector
),
2269 cpu_to_be32(blksize
),
2270 cpu_to_be64(block_id
));
2273 int drbd_send_drequest(struct drbd_conf
*mdev
, int cmd
,
2274 sector_t sector
, int size
, u64 block_id
)
2277 struct p_block_req p
;
2279 p
.sector
= cpu_to_be64(sector
);
2280 p
.block_id
= block_id
;
2281 p
.blksize
= cpu_to_be32(size
);
2283 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, cmd
,
2284 (struct p_header80
*)&p
, sizeof(p
));
2288 int drbd_send_drequest_csum(struct drbd_conf
*mdev
,
2289 sector_t sector
, int size
,
2290 void *digest
, int digest_size
,
2291 enum drbd_packets cmd
)
2294 struct p_block_req p
;
2296 p
.sector
= cpu_to_be64(sector
);
2297 p
.block_id
= BE_DRBD_MAGIC
+ 0xbeef;
2298 p
.blksize
= cpu_to_be32(size
);
2300 p
.head
.magic
= BE_DRBD_MAGIC
;
2301 p
.head
.command
= cpu_to_be16(cmd
);
2302 p
.head
.length
= cpu_to_be16(sizeof(p
) - sizeof(struct p_header80
) + digest_size
);
2304 mutex_lock(&mdev
->data
.mutex
);
2306 ok
= (sizeof(p
) == drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), 0));
2307 ok
= ok
&& (digest_size
== drbd_send(mdev
, mdev
->data
.socket
, digest
, digest_size
, 0));
2309 mutex_unlock(&mdev
->data
.mutex
);
2314 int drbd_send_ov_request(struct drbd_conf
*mdev
, sector_t sector
, int size
)
2317 struct p_block_req p
;
2319 p
.sector
= cpu_to_be64(sector
);
2320 p
.block_id
= BE_DRBD_MAGIC
+ 0xbabe;
2321 p
.blksize
= cpu_to_be32(size
);
2323 ok
= drbd_send_cmd(mdev
, USE_DATA_SOCKET
, P_OV_REQUEST
,
2324 (struct p_header80
*)&p
, sizeof(p
));
2328 /* called on sndtimeo
2329 * returns FALSE if we should retry,
2330 * TRUE if we think connection is dead
2332 static int we_should_drop_the_connection(struct drbd_conf
*mdev
, struct socket
*sock
)
2335 /* long elapsed = (long)(jiffies - mdev->last_received); */
2337 drop_it
= mdev
->meta
.socket
== sock
2338 || !mdev
->asender
.task
2339 || get_t_state(&mdev
->asender
) != Running
2340 || mdev
->state
.conn
< C_CONNECTED
;
2345 drop_it
= !--mdev
->ko_count
;
2347 dev_err(DEV
, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
2348 current
->comm
, current
->pid
, mdev
->ko_count
);
2352 return drop_it
; /* && (mdev->state == R_PRIMARY) */;
2355 /* The idea of sendpage seems to be to put some kind of reference
2356 * to the page into the skb, and to hand it over to the NIC. In
2357 * this process get_page() gets called.
2359 * As soon as the page was really sent over the network put_page()
2360 * gets called by some part of the network layer. [ NIC driver? ]
2362 * [ get_page() / put_page() increment/decrement the count. If count
2363 * reaches 0 the page will be freed. ]
2365 * This works nicely with pages from FSs.
2366 * But this means that in protocol A we might signal IO completion too early!
2368 * In order not to corrupt data during a resync we must make sure
2369 * that we do not reuse our own buffer pages (EEs) to early, therefore
2370 * we have the net_ee list.
2372 * XFS seems to have problems, still, it submits pages with page_count == 0!
2373 * As a workaround, we disable sendpage on pages
2374 * with page_count == 0 or PageSlab.
2376 static int _drbd_no_send_page(struct drbd_conf
*mdev
, struct page
*page
,
2377 int offset
, size_t size
, unsigned msg_flags
)
2379 int sent
= drbd_send(mdev
, mdev
->data
.socket
, kmap(page
) + offset
, size
, msg_flags
);
2382 mdev
->send_cnt
+= size
>>9;
2383 return sent
== size
;
2386 static int _drbd_send_page(struct drbd_conf
*mdev
, struct page
*page
,
2387 int offset
, size_t size
, unsigned msg_flags
)
2389 mm_segment_t oldfs
= get_fs();
2393 /* e.g. XFS meta- & log-data is in slab pages, which have a
2394 * page_count of 0 and/or have PageSlab() set.
2395 * we cannot use send_page for those, as that does get_page();
2396 * put_page(); and would cause either a VM_BUG directly, or
2397 * __page_cache_release a page that would actually still be referenced
2398 * by someone, leading to some obscure delayed Oops somewhere else. */
2399 if (disable_sendpage
|| (page_count(page
) < 1) || PageSlab(page
))
2400 return _drbd_no_send_page(mdev
, page
, offset
, size
, msg_flags
);
2402 msg_flags
|= MSG_NOSIGNAL
;
2403 drbd_update_congested(mdev
);
2406 sent
= mdev
->data
.socket
->ops
->sendpage(mdev
->data
.socket
, page
,
2409 if (sent
== -EAGAIN
) {
2410 if (we_should_drop_the_connection(mdev
,
2417 dev_warn(DEV
, "%s: size=%d len=%d sent=%d\n",
2418 __func__
, (int)size
, len
, sent
);
2423 } while (len
> 0 /* THINK && mdev->cstate >= C_CONNECTED*/);
2425 clear_bit(NET_CONGESTED
, &mdev
->flags
);
2429 mdev
->send_cnt
+= size
>>9;
2433 static int _drbd_send_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
2435 struct bio_vec
*bvec
;
2437 /* hint all but last page with MSG_MORE */
2438 __bio_for_each_segment(bvec
, bio
, i
, 0) {
2439 if (!_drbd_no_send_page(mdev
, bvec
->bv_page
,
2440 bvec
->bv_offset
, bvec
->bv_len
,
2441 i
== bio
->bi_vcnt
-1 ? 0 : MSG_MORE
))
2447 static int _drbd_send_zc_bio(struct drbd_conf
*mdev
, struct bio
*bio
)
2449 struct bio_vec
*bvec
;
2451 /* hint all but last page with MSG_MORE */
2452 __bio_for_each_segment(bvec
, bio
, i
, 0) {
2453 if (!_drbd_send_page(mdev
, bvec
->bv_page
,
2454 bvec
->bv_offset
, bvec
->bv_len
,
2455 i
== bio
->bi_vcnt
-1 ? 0 : MSG_MORE
))
2461 static int _drbd_send_zc_ee(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
)
2463 struct page
*page
= e
->pages
;
2464 unsigned len
= e
->size
;
2465 /* hint all but last page with MSG_MORE */
2466 page_chain_for_each(page
) {
2467 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
2468 if (!_drbd_send_page(mdev
, page
, 0, l
,
2469 page_chain_next(page
) ? MSG_MORE
: 0))
2476 static u32
bio_flags_to_wire(struct drbd_conf
*mdev
, unsigned long bi_rw
)
2478 if (mdev
->agreed_pro_version
>= 95)
2479 return (bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0) |
2480 (bi_rw
& REQ_FUA
? DP_FUA
: 0) |
2481 (bi_rw
& REQ_FLUSH
? DP_FLUSH
: 0) |
2482 (bi_rw
& REQ_DISCARD
? DP_DISCARD
: 0);
2484 return bi_rw
& REQ_SYNC
? DP_RW_SYNC
: 0;
2487 /* Used to send write requests
2488 * R_PRIMARY -> Peer (P_DATA)
2490 int drbd_send_dblock(struct drbd_conf
*mdev
, struct drbd_request
*req
)
2494 unsigned int dp_flags
= 0;
2498 if (!drbd_get_data_sock(mdev
))
2501 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_w_tfm
) ?
2502 crypto_hash_digestsize(mdev
->integrity_w_tfm
) : 0;
2504 if (req
->size
<= DRBD_MAX_SIZE_H80_PACKET
) {
2505 p
.head
.h80
.magic
= BE_DRBD_MAGIC
;
2506 p
.head
.h80
.command
= cpu_to_be16(P_DATA
);
2508 cpu_to_be16(sizeof(p
) - sizeof(union p_header
) + dgs
+ req
->size
);
2510 p
.head
.h95
.magic
= BE_DRBD_MAGIC_BIG
;
2511 p
.head
.h95
.command
= cpu_to_be16(P_DATA
);
2513 cpu_to_be32(sizeof(p
) - sizeof(union p_header
) + dgs
+ req
->size
);
2516 p
.sector
= cpu_to_be64(req
->sector
);
2517 p
.block_id
= (unsigned long)req
;
2518 p
.seq_num
= cpu_to_be32(req
->seq_num
=
2519 atomic_add_return(1, &mdev
->packet_seq
));
2521 dp_flags
= bio_flags_to_wire(mdev
, req
->master_bio
->bi_rw
);
2523 if (mdev
->state
.conn
>= C_SYNC_SOURCE
&&
2524 mdev
->state
.conn
<= C_PAUSED_SYNC_T
)
2525 dp_flags
|= DP_MAY_SET_IN_SYNC
;
2527 p
.dp_flags
= cpu_to_be32(dp_flags
);
2528 set_bit(UNPLUG_REMOTE
, &mdev
->flags
);
2530 drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), dgs
? MSG_MORE
: 0));
2532 dgb
= mdev
->int_dig_out
;
2533 drbd_csum_bio(mdev
, mdev
->integrity_w_tfm
, req
->master_bio
, dgb
);
2534 ok
= drbd_send(mdev
, mdev
->data
.socket
, dgb
, dgs
, 0);
2537 if (mdev
->net_conf
->wire_protocol
== DRBD_PROT_A
)
2538 ok
= _drbd_send_bio(mdev
, req
->master_bio
);
2540 ok
= _drbd_send_zc_bio(mdev
, req
->master_bio
);
2543 drbd_put_data_sock(mdev
);
2548 /* answer packet, used to send data back for read requests:
2549 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
2550 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
2552 int drbd_send_block(struct drbd_conf
*mdev
, enum drbd_packets cmd
,
2553 struct drbd_epoch_entry
*e
)
2560 dgs
= (mdev
->agreed_pro_version
>= 87 && mdev
->integrity_w_tfm
) ?
2561 crypto_hash_digestsize(mdev
->integrity_w_tfm
) : 0;
2563 if (e
->size
<= DRBD_MAX_SIZE_H80_PACKET
) {
2564 p
.head
.h80
.magic
= BE_DRBD_MAGIC
;
2565 p
.head
.h80
.command
= cpu_to_be16(cmd
);
2567 cpu_to_be16(sizeof(p
) - sizeof(struct p_header80
) + dgs
+ e
->size
);
2569 p
.head
.h95
.magic
= BE_DRBD_MAGIC_BIG
;
2570 p
.head
.h95
.command
= cpu_to_be16(cmd
);
2572 cpu_to_be32(sizeof(p
) - sizeof(struct p_header80
) + dgs
+ e
->size
);
2575 p
.sector
= cpu_to_be64(e
->sector
);
2576 p
.block_id
= e
->block_id
;
2577 /* p.seq_num = 0; No sequence numbers here.. */
2579 /* Only called by our kernel thread.
2580 * This one may be interrupted by DRBD_SIG and/or DRBD_SIGKILL
2581 * in response to admin command or module unload.
2583 if (!drbd_get_data_sock(mdev
))
2586 ok
= sizeof(p
) == drbd_send(mdev
, mdev
->data
.socket
, &p
, sizeof(p
), dgs
? MSG_MORE
: 0);
2588 dgb
= mdev
->int_dig_out
;
2589 drbd_csum_ee(mdev
, mdev
->integrity_w_tfm
, e
, dgb
);
2590 ok
= drbd_send(mdev
, mdev
->data
.socket
, dgb
, dgs
, 0);
2593 ok
= _drbd_send_zc_ee(mdev
, e
);
2595 drbd_put_data_sock(mdev
);
2601 drbd_send distinguishes two cases:
2603 Packets sent via the data socket "sock"
2604 and packets sent via the meta data socket "msock"
2607 -----------------+-------------------------+------------------------------
2608 timeout conf.timeout / 2 conf.timeout / 2
2609 timeout action send a ping via msock Abort communication
2610 and close all sockets
2614 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
2616 int drbd_send(struct drbd_conf
*mdev
, struct socket
*sock
,
2617 void *buf
, size_t size
, unsigned msg_flags
)
2626 /* THINK if (signal_pending) return ... ? */
2631 msg
.msg_name
= NULL
;
2632 msg
.msg_namelen
= 0;
2633 msg
.msg_control
= NULL
;
2634 msg
.msg_controllen
= 0;
2635 msg
.msg_flags
= msg_flags
| MSG_NOSIGNAL
;
2637 if (sock
== mdev
->data
.socket
) {
2638 mdev
->ko_count
= mdev
->net_conf
->ko_count
;
2639 drbd_update_congested(mdev
);
2643 * tcp_sendmsg does _not_ use its size parameter at all ?
2645 * -EAGAIN on timeout, -EINTR on signal.
2648 * do we need to block DRBD_SIG if sock == &meta.socket ??
2649 * otherwise wake_asender() might interrupt some send_*Ack !
2651 rv
= kernel_sendmsg(sock
, &msg
, &iov
, 1, size
);
2652 if (rv
== -EAGAIN
) {
2653 if (we_should_drop_the_connection(mdev
, sock
))
2660 flush_signals(current
);
2668 } while (sent
< size
);
2670 if (sock
== mdev
->data
.socket
)
2671 clear_bit(NET_CONGESTED
, &mdev
->flags
);
2674 if (rv
!= -EAGAIN
) {
2675 dev_err(DEV
, "%s_sendmsg returned %d\n",
2676 sock
== mdev
->meta
.socket
? "msock" : "sock",
2678 drbd_force_state(mdev
, NS(conn
, C_BROKEN_PIPE
));
2680 drbd_force_state(mdev
, NS(conn
, C_TIMEOUT
));
2686 static int drbd_open(struct block_device
*bdev
, fmode_t mode
)
2688 struct drbd_conf
*mdev
= bdev
->bd_disk
->private_data
;
2689 unsigned long flags
;
2692 mutex_lock(&drbd_main_mutex
);
2693 spin_lock_irqsave(&mdev
->req_lock
, flags
);
2694 /* to have a stable mdev->state.role
2695 * and no race with updating open_cnt */
2697 if (mdev
->state
.role
!= R_PRIMARY
) {
2698 if (mode
& FMODE_WRITE
)
2700 else if (!allow_oos
)
2706 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
2707 mutex_unlock(&drbd_main_mutex
);
2712 static int drbd_release(struct gendisk
*gd
, fmode_t mode
)
2714 struct drbd_conf
*mdev
= gd
->private_data
;
2715 mutex_lock(&drbd_main_mutex
);
2717 mutex_unlock(&drbd_main_mutex
);
2721 static void drbd_set_defaults(struct drbd_conf
*mdev
)
2723 /* This way we get a compile error when sync_conf grows,
2724 and we forgot to initialize it here */
2725 mdev
->sync_conf
= (struct syncer_conf
) {
2726 /* .rate = */ DRBD_RATE_DEF
,
2727 /* .after = */ DRBD_AFTER_DEF
,
2728 /* .al_extents = */ DRBD_AL_EXTENTS_DEF
,
2729 /* .verify_alg = */ {}, 0,
2730 /* .cpu_mask = */ {}, 0,
2731 /* .csums_alg = */ {}, 0,
2733 /* .on_no_data = */ DRBD_ON_NO_DATA_DEF
,
2734 /* .c_plan_ahead = */ DRBD_C_PLAN_AHEAD_DEF
,
2735 /* .c_delay_target = */ DRBD_C_DELAY_TARGET_DEF
,
2736 /* .c_fill_target = */ DRBD_C_FILL_TARGET_DEF
,
2737 /* .c_max_rate = */ DRBD_C_MAX_RATE_DEF
,
2738 /* .c_min_rate = */ DRBD_C_MIN_RATE_DEF
2741 /* Have to use that way, because the layout differs between
2742 big endian and little endian */
2743 mdev
->state
= (union drbd_state
) {
2744 { .role
= R_SECONDARY
,
2746 .conn
= C_STANDALONE
,
2755 void drbd_init_set_defaults(struct drbd_conf
*mdev
)
2757 /* the memset(,0,) did most of this.
2758 * note: only assignments, no allocation in here */
2760 drbd_set_defaults(mdev
);
2762 atomic_set(&mdev
->ap_bio_cnt
, 0);
2763 atomic_set(&mdev
->ap_pending_cnt
, 0);
2764 atomic_set(&mdev
->rs_pending_cnt
, 0);
2765 atomic_set(&mdev
->unacked_cnt
, 0);
2766 atomic_set(&mdev
->local_cnt
, 0);
2767 atomic_set(&mdev
->net_cnt
, 0);
2768 atomic_set(&mdev
->packet_seq
, 0);
2769 atomic_set(&mdev
->pp_in_use
, 0);
2770 atomic_set(&mdev
->pp_in_use_by_net
, 0);
2771 atomic_set(&mdev
->rs_sect_in
, 0);
2772 atomic_set(&mdev
->rs_sect_ev
, 0);
2774 mutex_init(&mdev
->md_io_mutex
);
2775 mutex_init(&mdev
->data
.mutex
);
2776 mutex_init(&mdev
->meta
.mutex
);
2777 sema_init(&mdev
->data
.work
.s
, 0);
2778 sema_init(&mdev
->meta
.work
.s
, 0);
2779 mutex_init(&mdev
->state_mutex
);
2781 spin_lock_init(&mdev
->data
.work
.q_lock
);
2782 spin_lock_init(&mdev
->meta
.work
.q_lock
);
2784 spin_lock_init(&mdev
->al_lock
);
2785 spin_lock_init(&mdev
->req_lock
);
2786 spin_lock_init(&mdev
->peer_seq_lock
);
2787 spin_lock_init(&mdev
->epoch_lock
);
2789 INIT_LIST_HEAD(&mdev
->active_ee
);
2790 INIT_LIST_HEAD(&mdev
->sync_ee
);
2791 INIT_LIST_HEAD(&mdev
->done_ee
);
2792 INIT_LIST_HEAD(&mdev
->read_ee
);
2793 INIT_LIST_HEAD(&mdev
->net_ee
);
2794 INIT_LIST_HEAD(&mdev
->resync_reads
);
2795 INIT_LIST_HEAD(&mdev
->data
.work
.q
);
2796 INIT_LIST_HEAD(&mdev
->meta
.work
.q
);
2797 INIT_LIST_HEAD(&mdev
->resync_work
.list
);
2798 INIT_LIST_HEAD(&mdev
->unplug_work
.list
);
2799 INIT_LIST_HEAD(&mdev
->go_diskless
.list
);
2800 INIT_LIST_HEAD(&mdev
->md_sync_work
.list
);
2801 INIT_LIST_HEAD(&mdev
->bm_io_work
.w
.list
);
2803 mdev
->resync_work
.cb
= w_resync_inactive
;
2804 mdev
->unplug_work
.cb
= w_send_write_hint
;
2805 mdev
->go_diskless
.cb
= w_go_diskless
;
2806 mdev
->md_sync_work
.cb
= w_md_sync
;
2807 mdev
->bm_io_work
.w
.cb
= w_bitmap_io
;
2808 init_timer(&mdev
->resync_timer
);
2809 init_timer(&mdev
->md_sync_timer
);
2810 mdev
->resync_timer
.function
= resync_timer_fn
;
2811 mdev
->resync_timer
.data
= (unsigned long) mdev
;
2812 mdev
->md_sync_timer
.function
= md_sync_timer_fn
;
2813 mdev
->md_sync_timer
.data
= (unsigned long) mdev
;
2815 init_waitqueue_head(&mdev
->misc_wait
);
2816 init_waitqueue_head(&mdev
->state_wait
);
2817 init_waitqueue_head(&mdev
->net_cnt_wait
);
2818 init_waitqueue_head(&mdev
->ee_wait
);
2819 init_waitqueue_head(&mdev
->al_wait
);
2820 init_waitqueue_head(&mdev
->seq_wait
);
2822 drbd_thread_init(mdev
, &mdev
->receiver
, drbdd_init
);
2823 drbd_thread_init(mdev
, &mdev
->worker
, drbd_worker
);
2824 drbd_thread_init(mdev
, &mdev
->asender
, drbd_asender
);
2826 mdev
->agreed_pro_version
= PRO_VERSION_MAX
;
2827 mdev
->write_ordering
= WO_bdev_flush
;
2828 mdev
->resync_wenr
= LC_FREE
;
2831 void drbd_mdev_cleanup(struct drbd_conf
*mdev
)
2834 if (mdev
->receiver
.t_state
!= None
)
2835 dev_err(DEV
, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2836 mdev
->receiver
.t_state
);
2838 /* no need to lock it, I'm the only thread alive */
2839 if (atomic_read(&mdev
->current_epoch
->epoch_size
) != 0)
2840 dev_err(DEV
, "epoch_size:%d\n", atomic_read(&mdev
->current_epoch
->epoch_size
));
2850 mdev
->rs_failed
= 0;
2851 mdev
->rs_last_events
= 0;
2852 mdev
->rs_last_sect_ev
= 0;
2853 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2854 mdev
->rs_mark_left
[i
] = 0;
2855 mdev
->rs_mark_time
[i
] = 0;
2857 D_ASSERT(mdev
->net_conf
== NULL
);
2859 drbd_set_my_capacity(mdev
, 0);
2861 /* maybe never allocated. */
2862 drbd_bm_resize(mdev
, 0, 1);
2863 drbd_bm_cleanup(mdev
);
2866 drbd_free_resources(mdev
);
2867 clear_bit(AL_SUSPENDED
, &mdev
->flags
);
2870 * currently we drbd_init_ee only on module load, so
2871 * we may do drbd_release_ee only on module unload!
2873 D_ASSERT(list_empty(&mdev
->active_ee
));
2874 D_ASSERT(list_empty(&mdev
->sync_ee
));
2875 D_ASSERT(list_empty(&mdev
->done_ee
));
2876 D_ASSERT(list_empty(&mdev
->read_ee
));
2877 D_ASSERT(list_empty(&mdev
->net_ee
));
2878 D_ASSERT(list_empty(&mdev
->resync_reads
));
2879 D_ASSERT(list_empty(&mdev
->data
.work
.q
));
2880 D_ASSERT(list_empty(&mdev
->meta
.work
.q
));
2881 D_ASSERT(list_empty(&mdev
->resync_work
.list
));
2882 D_ASSERT(list_empty(&mdev
->unplug_work
.list
));
2883 D_ASSERT(list_empty(&mdev
->go_diskless
.list
));
2887 static void drbd_destroy_mempools(void)
2891 while (drbd_pp_pool
) {
2892 page
= drbd_pp_pool
;
2893 drbd_pp_pool
= (struct page
*)page_private(page
);
2898 /* D_ASSERT(atomic_read(&drbd_pp_vacant)==0); */
2900 if (drbd_ee_mempool
)
2901 mempool_destroy(drbd_ee_mempool
);
2902 if (drbd_request_mempool
)
2903 mempool_destroy(drbd_request_mempool
);
2905 kmem_cache_destroy(drbd_ee_cache
);
2906 if (drbd_request_cache
)
2907 kmem_cache_destroy(drbd_request_cache
);
2908 if (drbd_bm_ext_cache
)
2909 kmem_cache_destroy(drbd_bm_ext_cache
);
2910 if (drbd_al_ext_cache
)
2911 kmem_cache_destroy(drbd_al_ext_cache
);
2913 drbd_ee_mempool
= NULL
;
2914 drbd_request_mempool
= NULL
;
2915 drbd_ee_cache
= NULL
;
2916 drbd_request_cache
= NULL
;
2917 drbd_bm_ext_cache
= NULL
;
2918 drbd_al_ext_cache
= NULL
;
2923 static int drbd_create_mempools(void)
2926 const int number
= (DRBD_MAX_SEGMENT_SIZE
/PAGE_SIZE
) * minor_count
;
2929 /* prepare our caches and mempools */
2930 drbd_request_mempool
= NULL
;
2931 drbd_ee_cache
= NULL
;
2932 drbd_request_cache
= NULL
;
2933 drbd_bm_ext_cache
= NULL
;
2934 drbd_al_ext_cache
= NULL
;
2935 drbd_pp_pool
= NULL
;
2938 drbd_request_cache
= kmem_cache_create(
2939 "drbd_req", sizeof(struct drbd_request
), 0, 0, NULL
);
2940 if (drbd_request_cache
== NULL
)
2943 drbd_ee_cache
= kmem_cache_create(
2944 "drbd_ee", sizeof(struct drbd_epoch_entry
), 0, 0, NULL
);
2945 if (drbd_ee_cache
== NULL
)
2948 drbd_bm_ext_cache
= kmem_cache_create(
2949 "drbd_bm", sizeof(struct bm_extent
), 0, 0, NULL
);
2950 if (drbd_bm_ext_cache
== NULL
)
2953 drbd_al_ext_cache
= kmem_cache_create(
2954 "drbd_al", sizeof(struct lc_element
), 0, 0, NULL
);
2955 if (drbd_al_ext_cache
== NULL
)
2959 drbd_request_mempool
= mempool_create(number
,
2960 mempool_alloc_slab
, mempool_free_slab
, drbd_request_cache
);
2961 if (drbd_request_mempool
== NULL
)
2964 drbd_ee_mempool
= mempool_create(number
,
2965 mempool_alloc_slab
, mempool_free_slab
, drbd_ee_cache
);
2966 if (drbd_ee_mempool
== NULL
)
2969 /* drbd's page pool */
2970 spin_lock_init(&drbd_pp_lock
);
2972 for (i
= 0; i
< number
; i
++) {
2973 page
= alloc_page(GFP_HIGHUSER
);
2976 set_page_private(page
, (unsigned long)drbd_pp_pool
);
2977 drbd_pp_pool
= page
;
2979 drbd_pp_vacant
= number
;
2984 drbd_destroy_mempools(); /* in case we allocated some */
2988 static int drbd_notify_sys(struct notifier_block
*this, unsigned long code
,
2991 /* just so we have it. you never know what interesting things we
2992 * might want to do here some day...
2998 static struct notifier_block drbd_notifier
= {
2999 .notifier_call
= drbd_notify_sys
,
3002 static void drbd_release_ee_lists(struct drbd_conf
*mdev
)
3006 rr
= drbd_release_ee(mdev
, &mdev
->active_ee
);
3008 dev_err(DEV
, "%d EEs in active list found!\n", rr
);
3010 rr
= drbd_release_ee(mdev
, &mdev
->sync_ee
);
3012 dev_err(DEV
, "%d EEs in sync list found!\n", rr
);
3014 rr
= drbd_release_ee(mdev
, &mdev
->read_ee
);
3016 dev_err(DEV
, "%d EEs in read list found!\n", rr
);
3018 rr
= drbd_release_ee(mdev
, &mdev
->done_ee
);
3020 dev_err(DEV
, "%d EEs in done list found!\n", rr
);
3022 rr
= drbd_release_ee(mdev
, &mdev
->net_ee
);
3024 dev_err(DEV
, "%d EEs in net list found!\n", rr
);
3027 /* caution. no locking.
3028 * currently only used from module cleanup code. */
3029 static void drbd_delete_device(unsigned int minor
)
3031 struct drbd_conf
*mdev
= minor_to_mdev(minor
);
3036 /* paranoia asserts */
3037 if (mdev
->open_cnt
!= 0)
3038 dev_err(DEV
, "open_cnt = %d in %s:%u", mdev
->open_cnt
,
3039 __FILE__
, __LINE__
);
3041 ERR_IF (!list_empty(&mdev
->data
.work
.q
)) {
3042 struct list_head
*lp
;
3043 list_for_each(lp
, &mdev
->data
.work
.q
) {
3044 dev_err(DEV
, "lp = %p\n", lp
);
3047 /* end paranoia asserts */
3049 del_gendisk(mdev
->vdisk
);
3051 /* cleanup stuff that may have been allocated during
3052 * device (re-)configuration or state changes */
3054 if (mdev
->this_bdev
)
3055 bdput(mdev
->this_bdev
);
3057 drbd_free_resources(mdev
);
3059 drbd_release_ee_lists(mdev
);
3061 /* should be free'd on disconnect? */
3062 kfree(mdev
->ee_hash
);
3064 mdev->ee_hash_s = 0;
3065 mdev->ee_hash = NULL;
3068 lc_destroy(mdev
->act_log
);
3069 lc_destroy(mdev
->resync
);
3071 kfree(mdev
->p_uuid
);
3072 /* mdev->p_uuid = NULL; */
3074 kfree(mdev
->int_dig_out
);
3075 kfree(mdev
->int_dig_in
);
3076 kfree(mdev
->int_dig_vv
);
3078 /* cleanup the rest that has been
3079 * allocated from drbd_new_device
3080 * and actually free the mdev itself */
3081 drbd_free_mdev(mdev
);
3084 static void drbd_cleanup(void)
3088 unregister_reboot_notifier(&drbd_notifier
);
3094 remove_proc_entry("drbd", NULL
);
3097 drbd_delete_device(i
);
3098 drbd_destroy_mempools();
3103 unregister_blkdev(DRBD_MAJOR
, "drbd");
3105 printk(KERN_INFO
"drbd: module cleanup done.\n");
3109 * drbd_congested() - Callback for pdflush
3110 * @congested_data: User data
3111 * @bdi_bits: Bits pdflush is currently interested in
3113 * Returns 1<<BDI_async_congested and/or 1<<BDI_sync_congested if we are congested.
3115 static int drbd_congested(void *congested_data
, int bdi_bits
)
3117 struct drbd_conf
*mdev
= congested_data
;
3118 struct request_queue
*q
;
3122 if (!__inc_ap_bio_cond(mdev
)) {
3123 /* DRBD has frozen IO */
3129 if (get_ldev(mdev
)) {
3130 q
= bdev_get_queue(mdev
->ldev
->backing_bdev
);
3131 r
= bdi_congested(&q
->backing_dev_info
, bdi_bits
);
3137 if (bdi_bits
& (1 << BDI_async_congested
) && test_bit(NET_CONGESTED
, &mdev
->flags
)) {
3138 r
|= (1 << BDI_async_congested
);
3139 reason
= reason
== 'b' ? 'a' : 'n';
3143 mdev
->congestion_reason
= reason
;
3147 struct drbd_conf
*drbd_new_device(unsigned int minor
)
3149 struct drbd_conf
*mdev
;
3150 struct gendisk
*disk
;
3151 struct request_queue
*q
;
3153 /* GFP_KERNEL, we are outside of all write-out paths */
3154 mdev
= kzalloc(sizeof(struct drbd_conf
), GFP_KERNEL
);
3157 if (!zalloc_cpumask_var(&mdev
->cpu_mask
, GFP_KERNEL
))
3158 goto out_no_cpumask
;
3160 mdev
->minor
= minor
;
3162 drbd_init_set_defaults(mdev
);
3164 q
= blk_alloc_queue(GFP_KERNEL
);
3168 q
->queuedata
= mdev
;
3170 disk
= alloc_disk(1);
3175 set_disk_ro(disk
, TRUE
);
3178 disk
->major
= DRBD_MAJOR
;
3179 disk
->first_minor
= minor
;
3180 disk
->fops
= &drbd_ops
;
3181 sprintf(disk
->disk_name
, "drbd%d", minor
);
3182 disk
->private_data
= mdev
;
3184 mdev
->this_bdev
= bdget(MKDEV(DRBD_MAJOR
, minor
));
3185 /* we have no partitions. we contain only ourselves. */
3186 mdev
->this_bdev
->bd_contains
= mdev
->this_bdev
;
3188 q
->backing_dev_info
.congested_fn
= drbd_congested
;
3189 q
->backing_dev_info
.congested_data
= mdev
;
3191 blk_queue_make_request(q
, drbd_make_request_26
);
3192 blk_queue_max_segment_size(q
, DRBD_MAX_SEGMENT_SIZE
);
3193 blk_queue_bounce_limit(q
, BLK_BOUNCE_ANY
);
3194 blk_queue_merge_bvec(q
, drbd_merge_bvec
);
3195 q
->queue_lock
= &mdev
->req_lock
;
3197 mdev
->md_io_page
= alloc_page(GFP_KERNEL
);
3198 if (!mdev
->md_io_page
)
3199 goto out_no_io_page
;
3201 if (drbd_bm_init(mdev
))
3203 /* no need to lock access, we are still initializing this minor device. */
3207 mdev
->app_reads_hash
= kzalloc(APP_R_HSIZE
*sizeof(void *), GFP_KERNEL
);
3208 if (!mdev
->app_reads_hash
)
3209 goto out_no_app_reads
;
3211 mdev
->current_epoch
= kzalloc(sizeof(struct drbd_epoch
), GFP_KERNEL
);
3212 if (!mdev
->current_epoch
)
3215 INIT_LIST_HEAD(&mdev
->current_epoch
->list
);
3220 /* out_whatever_else:
3221 kfree(mdev->current_epoch); */
3223 kfree(mdev
->app_reads_hash
);
3227 drbd_bm_cleanup(mdev
);
3229 __free_page(mdev
->md_io_page
);
3233 blk_cleanup_queue(q
);
3235 free_cpumask_var(mdev
->cpu_mask
);
3241 /* counterpart of drbd_new_device.
3242 * last part of drbd_delete_device. */
3243 void drbd_free_mdev(struct drbd_conf
*mdev
)
3245 kfree(mdev
->current_epoch
);
3246 kfree(mdev
->app_reads_hash
);
3248 if (mdev
->bitmap
) /* should no longer be there. */
3249 drbd_bm_cleanup(mdev
);
3250 __free_page(mdev
->md_io_page
);
3251 put_disk(mdev
->vdisk
);
3252 blk_cleanup_queue(mdev
->rq_queue
);
3253 free_cpumask_var(mdev
->cpu_mask
);
3258 int __init
drbd_init(void)
3262 if (sizeof(struct p_handshake
) != 80) {
3264 "drbd: never change the size or layout "
3265 "of the HandShake packet.\n");
3269 if (1 > minor_count
|| minor_count
> 255) {
3271 "drbd: invalid minor_count (%d)\n", minor_count
);
3279 err
= drbd_nl_init();
3283 err
= register_blkdev(DRBD_MAJOR
, "drbd");
3286 "drbd: unable to register block device major %d\n",
3291 register_reboot_notifier(&drbd_notifier
);
3294 * allocate all necessary structs
3298 init_waitqueue_head(&drbd_pp_wait
);
3300 drbd_proc
= NULL
; /* play safe for drbd_cleanup */
3301 minor_table
= kzalloc(sizeof(struct drbd_conf
*)*minor_count
,
3306 err
= drbd_create_mempools();
3310 drbd_proc
= proc_create_data("drbd", S_IFREG
| S_IRUGO
, NULL
, &drbd_proc_fops
, NULL
);
3312 printk(KERN_ERR
"drbd: unable to register proc file\n");
3316 rwlock_init(&global_state_lock
);
3318 printk(KERN_INFO
"drbd: initialized. "
3319 "Version: " REL_VERSION
" (api:%d/proto:%d-%d)\n",
3320 API_VERSION
, PRO_VERSION_MIN
, PRO_VERSION_MAX
);
3321 printk(KERN_INFO
"drbd: %s\n", drbd_buildtag());
3322 printk(KERN_INFO
"drbd: registered as block device major %d\n",
3324 printk(KERN_INFO
"drbd: minor_table @ 0x%p\n", minor_table
);
3326 return 0; /* Success! */
3331 /* currently always the case */
3332 printk(KERN_ERR
"drbd: ran out of memory\n");
3334 printk(KERN_ERR
"drbd: initialization failure\n");
3338 void drbd_free_bc(struct drbd_backing_dev
*ldev
)
3343 blkdev_put(ldev
->backing_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
3344 blkdev_put(ldev
->md_bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
3349 void drbd_free_sock(struct drbd_conf
*mdev
)
3351 if (mdev
->data
.socket
) {
3352 mutex_lock(&mdev
->data
.mutex
);
3353 kernel_sock_shutdown(mdev
->data
.socket
, SHUT_RDWR
);
3354 sock_release(mdev
->data
.socket
);
3355 mdev
->data
.socket
= NULL
;
3356 mutex_unlock(&mdev
->data
.mutex
);
3358 if (mdev
->meta
.socket
) {
3359 mutex_lock(&mdev
->meta
.mutex
);
3360 kernel_sock_shutdown(mdev
->meta
.socket
, SHUT_RDWR
);
3361 sock_release(mdev
->meta
.socket
);
3362 mdev
->meta
.socket
= NULL
;
3363 mutex_unlock(&mdev
->meta
.mutex
);
3368 void drbd_free_resources(struct drbd_conf
*mdev
)
3370 crypto_free_hash(mdev
->csums_tfm
);
3371 mdev
->csums_tfm
= NULL
;
3372 crypto_free_hash(mdev
->verify_tfm
);
3373 mdev
->verify_tfm
= NULL
;
3374 crypto_free_hash(mdev
->cram_hmac_tfm
);
3375 mdev
->cram_hmac_tfm
= NULL
;
3376 crypto_free_hash(mdev
->integrity_w_tfm
);
3377 mdev
->integrity_w_tfm
= NULL
;
3378 crypto_free_hash(mdev
->integrity_r_tfm
);
3379 mdev
->integrity_r_tfm
= NULL
;
3381 drbd_free_sock(mdev
);
3384 drbd_free_bc(mdev
->ldev
);
3385 mdev
->ldev
= NULL
;);
3388 /* meta data management */
3390 struct meta_data_on_disk
{
3391 u64 la_size
; /* last agreed size. */
3392 u64 uuid
[UI_SIZE
]; /* UUIDs. */
3395 u32 flags
; /* MDF */
3398 u32 al_offset
; /* offset to this block */
3399 u32 al_nr_extents
; /* important for restoring the AL */
3400 /* `-- act_log->nr_elements <-- sync_conf.al_extents */
3401 u32 bm_offset
; /* offset to the bitmap, from here */
3402 u32 bm_bytes_per_bit
; /* BM_BLOCK_SIZE */
3403 u32 reserved_u32
[4];
3408 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3409 * @mdev: DRBD device.
3411 void drbd_md_sync(struct drbd_conf
*mdev
)
3413 struct meta_data_on_disk
*buffer
;
3417 del_timer(&mdev
->md_sync_timer
);
3418 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3419 if (!test_and_clear_bit(MD_DIRTY
, &mdev
->flags
))
3422 /* We use here D_FAILED and not D_ATTACHING because we try to write
3423 * metadata even if we detach due to a disk failure! */
3424 if (!get_ldev_if_state(mdev
, D_FAILED
))
3427 mutex_lock(&mdev
->md_io_mutex
);
3428 buffer
= (struct meta_data_on_disk
*)page_address(mdev
->md_io_page
);
3429 memset(buffer
, 0, 512);
3431 buffer
->la_size
= cpu_to_be64(drbd_get_capacity(mdev
->this_bdev
));
3432 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3433 buffer
->uuid
[i
] = cpu_to_be64(mdev
->ldev
->md
.uuid
[i
]);
3434 buffer
->flags
= cpu_to_be32(mdev
->ldev
->md
.flags
);
3435 buffer
->magic
= cpu_to_be32(DRBD_MD_MAGIC
);
3437 buffer
->md_size_sect
= cpu_to_be32(mdev
->ldev
->md
.md_size_sect
);
3438 buffer
->al_offset
= cpu_to_be32(mdev
->ldev
->md
.al_offset
);
3439 buffer
->al_nr_extents
= cpu_to_be32(mdev
->act_log
->nr_elements
);
3440 buffer
->bm_bytes_per_bit
= cpu_to_be32(BM_BLOCK_SIZE
);
3441 buffer
->device_uuid
= cpu_to_be64(mdev
->ldev
->md
.device_uuid
);
3443 buffer
->bm_offset
= cpu_to_be32(mdev
->ldev
->md
.bm_offset
);
3445 D_ASSERT(drbd_md_ss__(mdev
, mdev
->ldev
) == mdev
->ldev
->md
.md_offset
);
3446 sector
= mdev
->ldev
->md
.md_offset
;
3448 if (!drbd_md_sync_page_io(mdev
, mdev
->ldev
, sector
, WRITE
)) {
3449 /* this was a try anyways ... */
3450 dev_err(DEV
, "meta data update failed!\n");
3451 drbd_chk_io_error(mdev
, 1, TRUE
);
3454 /* Update mdev->ldev->md.la_size_sect,
3455 * since we updated it on metadata. */
3456 mdev
->ldev
->md
.la_size_sect
= drbd_get_capacity(mdev
->this_bdev
);
3458 mutex_unlock(&mdev
->md_io_mutex
);
3463 * drbd_md_read() - Reads in the meta data super block
3464 * @mdev: DRBD device.
3465 * @bdev: Device from which the meta data should be read in.
3467 * Return 0 (NO_ERROR) on success, and an enum drbd_ret_codes in case
3468 * something goes wrong. Currently only: ERR_IO_MD_DISK, ERR_MD_INVALID.
3470 int drbd_md_read(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
)
3472 struct meta_data_on_disk
*buffer
;
3473 int i
, rv
= NO_ERROR
;
3475 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
3476 return ERR_IO_MD_DISK
;
3478 mutex_lock(&mdev
->md_io_mutex
);
3479 buffer
= (struct meta_data_on_disk
*)page_address(mdev
->md_io_page
);
3481 if (!drbd_md_sync_page_io(mdev
, bdev
, bdev
->md
.md_offset
, READ
)) {
3482 /* NOTE: cant do normal error processing here as this is
3483 called BEFORE disk is attached */
3484 dev_err(DEV
, "Error while reading metadata.\n");
3485 rv
= ERR_IO_MD_DISK
;
3489 if (be32_to_cpu(buffer
->magic
) != DRBD_MD_MAGIC
) {
3490 dev_err(DEV
, "Error while reading metadata, magic not found.\n");
3491 rv
= ERR_MD_INVALID
;
3494 if (be32_to_cpu(buffer
->al_offset
) != bdev
->md
.al_offset
) {
3495 dev_err(DEV
, "unexpected al_offset: %d (expected %d)\n",
3496 be32_to_cpu(buffer
->al_offset
), bdev
->md
.al_offset
);
3497 rv
= ERR_MD_INVALID
;
3500 if (be32_to_cpu(buffer
->bm_offset
) != bdev
->md
.bm_offset
) {
3501 dev_err(DEV
, "unexpected bm_offset: %d (expected %d)\n",
3502 be32_to_cpu(buffer
->bm_offset
), bdev
->md
.bm_offset
);
3503 rv
= ERR_MD_INVALID
;
3506 if (be32_to_cpu(buffer
->md_size_sect
) != bdev
->md
.md_size_sect
) {
3507 dev_err(DEV
, "unexpected md_size: %u (expected %u)\n",
3508 be32_to_cpu(buffer
->md_size_sect
), bdev
->md
.md_size_sect
);
3509 rv
= ERR_MD_INVALID
;
3513 if (be32_to_cpu(buffer
->bm_bytes_per_bit
) != BM_BLOCK_SIZE
) {
3514 dev_err(DEV
, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3515 be32_to_cpu(buffer
->bm_bytes_per_bit
), BM_BLOCK_SIZE
);
3516 rv
= ERR_MD_INVALID
;
3520 bdev
->md
.la_size_sect
= be64_to_cpu(buffer
->la_size
);
3521 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3522 bdev
->md
.uuid
[i
] = be64_to_cpu(buffer
->uuid
[i
]);
3523 bdev
->md
.flags
= be32_to_cpu(buffer
->flags
);
3524 mdev
->sync_conf
.al_extents
= be32_to_cpu(buffer
->al_nr_extents
);
3525 bdev
->md
.device_uuid
= be64_to_cpu(buffer
->device_uuid
);
3527 if (mdev
->sync_conf
.al_extents
< 7)
3528 mdev
->sync_conf
.al_extents
= 127;
3531 mutex_unlock(&mdev
->md_io_mutex
);
3537 static void debug_drbd_uuid(struct drbd_conf
*mdev
, enum drbd_uuid_index index
)
3539 static char *uuid_str
[UI_EXTENDED_SIZE
] = {
3540 [UI_CURRENT
] = "CURRENT",
3541 [UI_BITMAP
] = "BITMAP",
3542 [UI_HISTORY_START
] = "HISTORY_START",
3543 [UI_HISTORY_END
] = "HISTORY_END",
3545 [UI_FLAGS
] = "FLAGS",
3548 if (index
>= UI_EXTENDED_SIZE
) {
3549 dev_warn(DEV
, " uuid_index >= EXTENDED_SIZE\n");
3553 dynamic_dev_dbg(DEV
, " uuid[%s] now %016llX\n",
3555 (unsigned long long)mdev
->ldev
->md
.uuid
[index
]);
3560 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3561 * @mdev: DRBD device.
3563 * Call this function if you change anything that should be written to
3564 * the meta-data super block. This function sets MD_DIRTY, and starts a
3565 * timer that ensures that within five seconds you have to call drbd_md_sync().
3568 void drbd_md_mark_dirty_(struct drbd_conf
*mdev
, unsigned int line
, const char *func
)
3570 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
)) {
3571 mod_timer(&mdev
->md_sync_timer
, jiffies
+ HZ
);
3572 mdev
->last_md_mark_dirty
.line
= line
;
3573 mdev
->last_md_mark_dirty
.func
= func
;
3577 void drbd_md_mark_dirty(struct drbd_conf
*mdev
)
3579 if (!test_and_set_bit(MD_DIRTY
, &mdev
->flags
))
3580 mod_timer(&mdev
->md_sync_timer
, jiffies
+ 5*HZ
);
3584 static void drbd_uuid_move_history(struct drbd_conf
*mdev
) __must_hold(local
)
3588 for (i
= UI_HISTORY_START
; i
< UI_HISTORY_END
; i
++) {
3589 mdev
->ldev
->md
.uuid
[i
+1] = mdev
->ldev
->md
.uuid
[i
];
3590 debug_drbd_uuid(mdev
, i
+1);
3594 void _drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3596 if (idx
== UI_CURRENT
) {
3597 if (mdev
->state
.role
== R_PRIMARY
)
3602 drbd_set_ed_uuid(mdev
, val
);
3605 mdev
->ldev
->md
.uuid
[idx
] = val
;
3606 debug_drbd_uuid(mdev
, idx
);
3607 drbd_md_mark_dirty(mdev
);
3611 void drbd_uuid_set(struct drbd_conf
*mdev
, int idx
, u64 val
) __must_hold(local
)
3613 if (mdev
->ldev
->md
.uuid
[idx
]) {
3614 drbd_uuid_move_history(mdev
);
3615 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[idx
];
3616 debug_drbd_uuid(mdev
, UI_HISTORY_START
);
3618 _drbd_uuid_set(mdev
, idx
, val
);
3622 * drbd_uuid_new_current() - Creates a new current UUID
3623 * @mdev: DRBD device.
3625 * Creates a new current UUID, and rotates the old current UUID into
3626 * the bitmap slot. Causes an incremental resync upon next connect.
3628 void drbd_uuid_new_current(struct drbd_conf
*mdev
) __must_hold(local
)
3632 dev_info(DEV
, "Creating new current UUID\n");
3633 D_ASSERT(mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0);
3634 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = mdev
->ldev
->md
.uuid
[UI_CURRENT
];
3635 debug_drbd_uuid(mdev
, UI_BITMAP
);
3637 get_random_bytes(&val
, sizeof(u64
));
3638 _drbd_uuid_set(mdev
, UI_CURRENT
, val
);
3639 /* get it to stable storage _now_ */
3643 void drbd_uuid_set_bm(struct drbd_conf
*mdev
, u64 val
) __must_hold(local
)
3645 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && val
== 0)
3649 drbd_uuid_move_history(mdev
);
3650 mdev
->ldev
->md
.uuid
[UI_HISTORY_START
] = mdev
->ldev
->md
.uuid
[UI_BITMAP
];
3651 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
3652 debug_drbd_uuid(mdev
, UI_HISTORY_START
);
3653 debug_drbd_uuid(mdev
, UI_BITMAP
);
3655 if (mdev
->ldev
->md
.uuid
[UI_BITMAP
])
3656 dev_warn(DEV
, "bm UUID already set");
3658 mdev
->ldev
->md
.uuid
[UI_BITMAP
] = val
;
3659 mdev
->ldev
->md
.uuid
[UI_BITMAP
] &= ~((u64
)1);
3661 debug_drbd_uuid(mdev
, UI_BITMAP
);
3663 drbd_md_mark_dirty(mdev
);
3667 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3668 * @mdev: DRBD device.
3670 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3672 int drbd_bmio_set_n_write(struct drbd_conf
*mdev
)
3676 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3677 drbd_md_set_flag(mdev
, MDF_FULL_SYNC
);
3679 drbd_bm_set_all(mdev
);
3681 rv
= drbd_bm_write(mdev
);
3684 drbd_md_clear_flag(mdev
, MDF_FULL_SYNC
);
3695 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3696 * @mdev: DRBD device.
3698 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3700 int drbd_bmio_clear_n_write(struct drbd_conf
*mdev
)
3704 drbd_resume_al(mdev
);
3705 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
3706 drbd_bm_clear_all(mdev
);
3707 rv
= drbd_bm_write(mdev
);
3714 static int w_bitmap_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
3716 struct bm_io_work
*work
= container_of(w
, struct bm_io_work
, w
);
3719 D_ASSERT(atomic_read(&mdev
->ap_bio_cnt
) == 0);
3721 drbd_bm_lock(mdev
, work
->why
);
3722 rv
= work
->io_fn(mdev
);
3723 drbd_bm_unlock(mdev
);
3725 clear_bit(BITMAP_IO
, &mdev
->flags
);
3726 wake_up(&mdev
->misc_wait
);
3729 work
->done(mdev
, rv
);
3731 clear_bit(BITMAP_IO_QUEUED
, &mdev
->flags
);
3737 void drbd_ldev_destroy(struct drbd_conf
*mdev
)
3739 lc_destroy(mdev
->resync
);
3740 mdev
->resync
= NULL
;
3741 lc_destroy(mdev
->act_log
);
3742 mdev
->act_log
= NULL
;
3744 drbd_free_bc(mdev
->ldev
);
3745 mdev
->ldev
= NULL
;);
3747 if (mdev
->md_io_tmpp
) {
3748 __free_page(mdev
->md_io_tmpp
);
3749 mdev
->md_io_tmpp
= NULL
;
3751 clear_bit(GO_DISKLESS
, &mdev
->flags
);
3754 static int w_go_diskless(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
3756 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3757 /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
3758 * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
3759 * the protected members anymore, though, so once put_ldev reaches zero
3760 * again, it will be safe to free them. */
3761 drbd_force_state(mdev
, NS(disk
, D_DISKLESS
));
3765 void drbd_go_diskless(struct drbd_conf
*mdev
)
3767 D_ASSERT(mdev
->state
.disk
== D_FAILED
);
3768 if (!test_and_set_bit(GO_DISKLESS
, &mdev
->flags
))
3769 drbd_queue_work(&mdev
->data
.work
, &mdev
->go_diskless
);
3773 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3774 * @mdev: DRBD device.
3775 * @io_fn: IO callback to be called when bitmap IO is possible
3776 * @done: callback to be called after the bitmap IO was performed
3777 * @why: Descriptive text of the reason for doing the IO
3779 * While IO on the bitmap happens we freeze application IO thus we ensure
3780 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3781 * called from worker context. It MUST NOT be used while a previous such
3782 * work is still pending!
3784 void drbd_queue_bitmap_io(struct drbd_conf
*mdev
,
3785 int (*io_fn
)(struct drbd_conf
*),
3786 void (*done
)(struct drbd_conf
*, int),
3789 D_ASSERT(current
== mdev
->worker
.task
);
3791 D_ASSERT(!test_bit(BITMAP_IO_QUEUED
, &mdev
->flags
));
3792 D_ASSERT(!test_bit(BITMAP_IO
, &mdev
->flags
));
3793 D_ASSERT(list_empty(&mdev
->bm_io_work
.w
.list
));
3794 if (mdev
->bm_io_work
.why
)
3795 dev_err(DEV
, "FIXME going to queue '%s' but '%s' still pending?\n",
3796 why
, mdev
->bm_io_work
.why
);
3798 mdev
->bm_io_work
.io_fn
= io_fn
;
3799 mdev
->bm_io_work
.done
= done
;
3800 mdev
->bm_io_work
.why
= why
;
3802 set_bit(BITMAP_IO
, &mdev
->flags
);
3803 if (atomic_read(&mdev
->ap_bio_cnt
) == 0) {
3804 if (list_empty(&mdev
->bm_io_work
.w
.list
)) {
3805 set_bit(BITMAP_IO_QUEUED
, &mdev
->flags
);
3806 drbd_queue_work(&mdev
->data
.work
, &mdev
->bm_io_work
.w
);
3808 dev_err(DEV
, "FIXME avoided double queuing bm_io_work\n");
3813 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3814 * @mdev: DRBD device.
3815 * @io_fn: IO callback to be called when bitmap IO is possible
3816 * @why: Descriptive text of the reason for doing the IO
3818 * freezes application IO while that the actual IO operations runs. This
3819 * functions MAY NOT be called from worker context.
3821 int drbd_bitmap_io(struct drbd_conf
*mdev
, int (*io_fn
)(struct drbd_conf
*), char *why
)
3825 D_ASSERT(current
!= mdev
->worker
.task
);
3827 drbd_suspend_io(mdev
);
3829 drbd_bm_lock(mdev
, why
);
3831 drbd_bm_unlock(mdev
);
3833 drbd_resume_io(mdev
);
3838 void drbd_md_set_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3840 if ((mdev
->ldev
->md
.flags
& flag
) != flag
) {
3841 drbd_md_mark_dirty(mdev
);
3842 mdev
->ldev
->md
.flags
|= flag
;
3846 void drbd_md_clear_flag(struct drbd_conf
*mdev
, int flag
) __must_hold(local
)
3848 if ((mdev
->ldev
->md
.flags
& flag
) != 0) {
3849 drbd_md_mark_dirty(mdev
);
3850 mdev
->ldev
->md
.flags
&= ~flag
;
3853 int drbd_md_test_flag(struct drbd_backing_dev
*bdev
, int flag
)
3855 return (bdev
->md
.flags
& flag
) != 0;
3858 static void md_sync_timer_fn(unsigned long data
)
3860 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
3862 drbd_queue_work_front(&mdev
->data
.work
, &mdev
->md_sync_work
);
3865 static int w_md_sync(struct drbd_conf
*mdev
, struct drbd_work
*w
, int unused
)
3867 dev_warn(DEV
, "md_sync_timer expired! Worker calls drbd_md_sync().\n");
3869 dev_warn(DEV
, "last md_mark_dirty: %s:%u\n",
3870 mdev
->last_md_mark_dirty
.func
, mdev
->last_md_mark_dirty
.line
);
3876 #ifdef CONFIG_DRBD_FAULT_INJECTION
3877 /* Fault insertion support including random number generator shamelessly
3878 * stolen from kernel/rcutorture.c */
3879 struct fault_random_state
{
3880 unsigned long state
;
3881 unsigned long count
;
3884 #define FAULT_RANDOM_MULT 39916801 /* prime */
3885 #define FAULT_RANDOM_ADD 479001701 /* prime */
3886 #define FAULT_RANDOM_REFRESH 10000
3889 * Crude but fast random-number generator. Uses a linear congruential
3890 * generator, with occasional help from get_random_bytes().
3892 static unsigned long
3893 _drbd_fault_random(struct fault_random_state
*rsp
)
3897 if (!rsp
->count
--) {
3898 get_random_bytes(&refresh
, sizeof(refresh
));
3899 rsp
->state
+= refresh
;
3900 rsp
->count
= FAULT_RANDOM_REFRESH
;
3902 rsp
->state
= rsp
->state
* FAULT_RANDOM_MULT
+ FAULT_RANDOM_ADD
;
3903 return swahw32(rsp
->state
);
3907 _drbd_fault_str(unsigned int type
) {
3908 static char *_faults
[] = {
3909 [DRBD_FAULT_MD_WR
] = "Meta-data write",
3910 [DRBD_FAULT_MD_RD
] = "Meta-data read",
3911 [DRBD_FAULT_RS_WR
] = "Resync write",
3912 [DRBD_FAULT_RS_RD
] = "Resync read",
3913 [DRBD_FAULT_DT_WR
] = "Data write",
3914 [DRBD_FAULT_DT_RD
] = "Data read",
3915 [DRBD_FAULT_DT_RA
] = "Data read ahead",
3916 [DRBD_FAULT_BM_ALLOC
] = "BM allocation",
3917 [DRBD_FAULT_AL_EE
] = "EE allocation",
3918 [DRBD_FAULT_RECEIVE
] = "receive data corruption",
3921 return (type
< DRBD_FAULT_MAX
) ? _faults
[type
] : "**Unknown**";
3925 _drbd_insert_fault(struct drbd_conf
*mdev
, unsigned int type
)
3927 static struct fault_random_state rrs
= {0, 0};
3929 unsigned int ret
= (
3931 ((1 << mdev_to_minor(mdev
)) & fault_devs
) != 0) &&
3932 (((_drbd_fault_random(&rrs
) % 100) + 1) <= fault_rate
));
3937 if (__ratelimit(&drbd_ratelimit_state
))
3938 dev_warn(DEV
, "***Simulating %s failure\n",
3939 _drbd_fault_str(type
));
3946 const char *drbd_buildtag(void)
3948 /* DRBD built from external sources has here a reference to the
3949 git hash of the source code. */
3951 static char buildtag
[38] = "\0uilt-in";
3953 if (buildtag
[0] == 0) {
3954 #ifdef CONFIG_MODULES
3955 if (THIS_MODULE
!= NULL
)
3956 sprintf(buildtag
, "srcversion: %-24s", THIS_MODULE
->srcversion
);
3965 module_init(drbd_init
)
3966 module_exit(drbd_cleanup
)
3968 EXPORT_SYMBOL(drbd_conn_str
);
3969 EXPORT_SYMBOL(drbd_role_str
);
3970 EXPORT_SYMBOL(drbd_disk_str
);
3971 EXPORT_SYMBOL(drbd_set_st_err_str
);