4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
28 #include <linux/drbd_limits.h>
30 #include "drbd_protocol.h"
32 #include "drbd_state_change.h"
34 struct after_state_chg_work
{
36 struct drbd_device
*device
;
39 enum chg_state_flags flags
;
40 struct completion
*done
;
41 struct drbd_state_change
*state_change
;
44 enum sanitize_state_warnings
{
46 ABORTED_ONLINE_VERIFY
,
48 CONNECTION_LOST_NEGOTIATING
,
49 IMPLICITLY_UPGRADED_DISK
,
50 IMPLICITLY_UPGRADED_PDSK
,
53 static void count_objects(struct drbd_resource
*resource
,
54 unsigned int *n_devices
,
55 unsigned int *n_connections
)
57 struct drbd_device
*device
;
58 struct drbd_connection
*connection
;
64 idr_for_each_entry(&resource
->devices
, device
, vnr
)
66 for_each_connection(connection
, resource
)
70 static struct drbd_state_change
*alloc_state_change(unsigned int n_devices
, unsigned int n_connections
, gfp_t gfp
)
72 struct drbd_state_change
*state_change
;
75 size
= sizeof(struct drbd_state_change
) +
76 n_devices
* sizeof(struct drbd_device_state_change
) +
77 n_connections
* sizeof(struct drbd_connection_state_change
) +
78 n_devices
* n_connections
* sizeof(struct drbd_peer_device_state_change
);
79 state_change
= kmalloc(size
, gfp
);
82 state_change
->n_devices
= n_devices
;
83 state_change
->n_connections
= n_connections
;
84 state_change
->devices
= (void *)(state_change
+ 1);
85 state_change
->connections
= (void *)&state_change
->devices
[n_devices
];
86 state_change
->peer_devices
= (void *)&state_change
->connections
[n_connections
];
87 state_change
->resource
->resource
= NULL
;
88 for (n
= 0; n
< n_devices
; n
++)
89 state_change
->devices
[n
].device
= NULL
;
90 for (n
= 0; n
< n_connections
; n
++)
91 state_change
->connections
[n
].connection
= NULL
;
95 struct drbd_state_change
*remember_old_state(struct drbd_resource
*resource
, gfp_t gfp
)
97 struct drbd_state_change
*state_change
;
98 struct drbd_device
*device
;
99 unsigned int n_devices
;
100 struct drbd_connection
*connection
;
101 unsigned int n_connections
;
104 struct drbd_device_state_change
*device_state_change
;
105 struct drbd_peer_device_state_change
*peer_device_state_change
;
106 struct drbd_connection_state_change
*connection_state_change
;
108 /* Caller holds req_lock spinlock.
109 * No state, no device IDR, no connections lists can change. */
110 count_objects(resource
, &n_devices
, &n_connections
);
111 state_change
= alloc_state_change(n_devices
, n_connections
, gfp
);
115 kref_get(&resource
->kref
);
116 state_change
->resource
->resource
= resource
;
117 state_change
->resource
->role
[OLD
] =
118 conn_highest_role(first_connection(resource
));
119 state_change
->resource
->susp
[OLD
] = resource
->susp
;
120 state_change
->resource
->susp_nod
[OLD
] = resource
->susp_nod
;
121 state_change
->resource
->susp_fen
[OLD
] = resource
->susp_fen
;
123 connection_state_change
= state_change
->connections
;
124 for_each_connection(connection
, resource
) {
125 kref_get(&connection
->kref
);
126 connection_state_change
->connection
= connection
;
127 connection_state_change
->cstate
[OLD
] =
129 connection_state_change
->peer_role
[OLD
] =
130 conn_highest_peer(connection
);
131 connection_state_change
++;
134 device_state_change
= state_change
->devices
;
135 peer_device_state_change
= state_change
->peer_devices
;
136 idr_for_each_entry(&resource
->devices
, device
, vnr
) {
137 kref_get(&device
->kref
);
138 device_state_change
->device
= device
;
139 device_state_change
->disk_state
[OLD
] = device
->state
.disk
;
141 /* The peer_devices for each device have to be enumerated in
142 the order of the connections. We may not use for_each_peer_device() here. */
143 for_each_connection(connection
, resource
) {
144 struct drbd_peer_device
*peer_device
;
146 peer_device
= conn_peer_device(connection
, device
->vnr
);
147 peer_device_state_change
->peer_device
= peer_device
;
148 peer_device_state_change
->disk_state
[OLD
] =
150 peer_device_state_change
->repl_state
[OLD
] =
151 max_t(enum drbd_conns
,
152 C_WF_REPORT_PARAMS
, device
->state
.conn
);
153 peer_device_state_change
->resync_susp_user
[OLD
] =
154 device
->state
.user_isp
;
155 peer_device_state_change
->resync_susp_peer
[OLD
] =
156 device
->state
.peer_isp
;
157 peer_device_state_change
->resync_susp_dependency
[OLD
] =
158 device
->state
.aftr_isp
;
159 peer_device_state_change
++;
161 device_state_change
++;
167 static void remember_new_state(struct drbd_state_change
*state_change
)
169 struct drbd_resource_state_change
*resource_state_change
;
170 struct drbd_resource
*resource
;
176 resource_state_change
= &state_change
->resource
[0];
177 resource
= resource_state_change
->resource
;
179 resource_state_change
->role
[NEW
] =
180 conn_highest_role(first_connection(resource
));
181 resource_state_change
->susp
[NEW
] = resource
->susp
;
182 resource_state_change
->susp_nod
[NEW
] = resource
->susp_nod
;
183 resource_state_change
->susp_fen
[NEW
] = resource
->susp_fen
;
185 for (n
= 0; n
< state_change
->n_devices
; n
++) {
186 struct drbd_device_state_change
*device_state_change
=
187 &state_change
->devices
[n
];
188 struct drbd_device
*device
= device_state_change
->device
;
190 device_state_change
->disk_state
[NEW
] = device
->state
.disk
;
193 for (n
= 0; n
< state_change
->n_connections
; n
++) {
194 struct drbd_connection_state_change
*connection_state_change
=
195 &state_change
->connections
[n
];
196 struct drbd_connection
*connection
=
197 connection_state_change
->connection
;
199 connection_state_change
->cstate
[NEW
] = connection
->cstate
;
200 connection_state_change
->peer_role
[NEW
] =
201 conn_highest_peer(connection
);
204 for (n
= 0; n
< state_change
->n_devices
* state_change
->n_connections
; n
++) {
205 struct drbd_peer_device_state_change
*peer_device_state_change
=
206 &state_change
->peer_devices
[n
];
207 struct drbd_device
*device
=
208 peer_device_state_change
->peer_device
->device
;
209 union drbd_dev_state state
= device
->state
;
211 peer_device_state_change
->disk_state
[NEW
] = state
.pdsk
;
212 peer_device_state_change
->repl_state
[NEW
] =
213 max_t(enum drbd_conns
, C_WF_REPORT_PARAMS
, state
.conn
);
214 peer_device_state_change
->resync_susp_user
[NEW
] =
216 peer_device_state_change
->resync_susp_peer
[NEW
] =
218 peer_device_state_change
->resync_susp_dependency
[NEW
] =
223 void copy_old_to_new_state_change(struct drbd_state_change
*state_change
)
225 struct drbd_resource_state_change
*resource_state_change
= &state_change
->resource
[0];
226 unsigned int n_device
, n_connection
, n_peer_device
, n_peer_devices
;
228 #define OLD_TO_NEW(x) \
231 OLD_TO_NEW(resource_state_change
->role
);
232 OLD_TO_NEW(resource_state_change
->susp
);
233 OLD_TO_NEW(resource_state_change
->susp_nod
);
234 OLD_TO_NEW(resource_state_change
->susp_fen
);
236 for (n_connection
= 0; n_connection
< state_change
->n_connections
; n_connection
++) {
237 struct drbd_connection_state_change
*connection_state_change
=
238 &state_change
->connections
[n_connection
];
240 OLD_TO_NEW(connection_state_change
->peer_role
);
241 OLD_TO_NEW(connection_state_change
->cstate
);
244 for (n_device
= 0; n_device
< state_change
->n_devices
; n_device
++) {
245 struct drbd_device_state_change
*device_state_change
=
246 &state_change
->devices
[n_device
];
248 OLD_TO_NEW(device_state_change
->disk_state
);
251 n_peer_devices
= state_change
->n_devices
* state_change
->n_connections
;
252 for (n_peer_device
= 0; n_peer_device
< n_peer_devices
; n_peer_device
++) {
253 struct drbd_peer_device_state_change
*p
=
254 &state_change
->peer_devices
[n_peer_device
];
256 OLD_TO_NEW(p
->disk_state
);
257 OLD_TO_NEW(p
->repl_state
);
258 OLD_TO_NEW(p
->resync_susp_user
);
259 OLD_TO_NEW(p
->resync_susp_peer
);
260 OLD_TO_NEW(p
->resync_susp_dependency
);
266 void forget_state_change(struct drbd_state_change
*state_change
)
273 if (state_change
->resource
->resource
)
274 kref_put(&state_change
->resource
->resource
->kref
, drbd_destroy_resource
);
275 for (n
= 0; n
< state_change
->n_devices
; n
++) {
276 struct drbd_device
*device
= state_change
->devices
[n
].device
;
279 kref_put(&device
->kref
, drbd_destroy_device
);
281 for (n
= 0; n
< state_change
->n_connections
; n
++) {
282 struct drbd_connection
*connection
=
283 state_change
->connections
[n
].connection
;
286 kref_put(&connection
->kref
, drbd_destroy_connection
);
291 static int w_after_state_ch(struct drbd_work
*w
, int unused
);
292 static void after_state_ch(struct drbd_device
*device
, union drbd_state os
,
293 union drbd_state ns
, enum chg_state_flags flags
,
294 struct drbd_state_change
*);
295 static enum drbd_state_rv
is_valid_state(struct drbd_device
*, union drbd_state
);
296 static enum drbd_state_rv
is_valid_soft_transition(union drbd_state
, union drbd_state
, struct drbd_connection
*);
297 static enum drbd_state_rv
is_valid_transition(union drbd_state os
, union drbd_state ns
);
298 static union drbd_state
sanitize_state(struct drbd_device
*device
, union drbd_state os
,
299 union drbd_state ns
, enum sanitize_state_warnings
*warn
);
301 static inline bool is_susp(union drbd_state s
)
303 return s
.susp
|| s
.susp_nod
|| s
.susp_fen
;
306 bool conn_all_vols_unconf(struct drbd_connection
*connection
)
308 struct drbd_peer_device
*peer_device
;
313 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
314 struct drbd_device
*device
= peer_device
->device
;
315 if (device
->state
.disk
!= D_DISKLESS
||
316 device
->state
.conn
!= C_STANDALONE
||
317 device
->state
.role
!= R_SECONDARY
) {
327 /* Unfortunately the states where not correctly ordered, when
328 they where defined. therefore can not use max_t() here. */
329 static enum drbd_role
max_role(enum drbd_role role1
, enum drbd_role role2
)
331 if (role1
== R_PRIMARY
|| role2
== R_PRIMARY
)
333 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
338 static enum drbd_role
min_role(enum drbd_role role1
, enum drbd_role role2
)
340 if (role1
== R_UNKNOWN
|| role2
== R_UNKNOWN
)
342 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
347 enum drbd_role
conn_highest_role(struct drbd_connection
*connection
)
349 enum drbd_role role
= R_UNKNOWN
;
350 struct drbd_peer_device
*peer_device
;
354 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
355 struct drbd_device
*device
= peer_device
->device
;
356 role
= max_role(role
, device
->state
.role
);
363 enum drbd_role
conn_highest_peer(struct drbd_connection
*connection
)
365 enum drbd_role peer
= R_UNKNOWN
;
366 struct drbd_peer_device
*peer_device
;
370 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
371 struct drbd_device
*device
= peer_device
->device
;
372 peer
= max_role(peer
, device
->state
.peer
);
379 enum drbd_disk_state
conn_highest_disk(struct drbd_connection
*connection
)
381 enum drbd_disk_state disk_state
= D_DISKLESS
;
382 struct drbd_peer_device
*peer_device
;
386 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
387 struct drbd_device
*device
= peer_device
->device
;
388 disk_state
= max_t(enum drbd_disk_state
, disk_state
, device
->state
.disk
);
395 enum drbd_disk_state
conn_lowest_disk(struct drbd_connection
*connection
)
397 enum drbd_disk_state disk_state
= D_MASK
;
398 struct drbd_peer_device
*peer_device
;
402 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
403 struct drbd_device
*device
= peer_device
->device
;
404 disk_state
= min_t(enum drbd_disk_state
, disk_state
, device
->state
.disk
);
411 enum drbd_disk_state
conn_highest_pdsk(struct drbd_connection
*connection
)
413 enum drbd_disk_state disk_state
= D_DISKLESS
;
414 struct drbd_peer_device
*peer_device
;
418 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
419 struct drbd_device
*device
= peer_device
->device
;
420 disk_state
= max_t(enum drbd_disk_state
, disk_state
, device
->state
.pdsk
);
427 enum drbd_conns
conn_lowest_conn(struct drbd_connection
*connection
)
429 enum drbd_conns conn
= C_MASK
;
430 struct drbd_peer_device
*peer_device
;
434 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
435 struct drbd_device
*device
= peer_device
->device
;
436 conn
= min_t(enum drbd_conns
, conn
, device
->state
.conn
);
443 static bool no_peer_wf_report_params(struct drbd_connection
*connection
)
445 struct drbd_peer_device
*peer_device
;
450 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
451 if (peer_device
->device
->state
.conn
== C_WF_REPORT_PARAMS
) {
460 static void wake_up_all_devices(struct drbd_connection
*connection
)
462 struct drbd_peer_device
*peer_device
;
466 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
467 wake_up(&peer_device
->device
->state_wait
);
474 * cl_wide_st_chg() - true if the state change is a cluster wide one
475 * @device: DRBD device.
476 * @os: old (current) state.
477 * @ns: new (wanted) state.
479 static int cl_wide_st_chg(struct drbd_device
*device
,
480 union drbd_state os
, union drbd_state ns
)
482 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
483 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
484 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
485 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
486 (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
))) ||
487 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
488 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
) ||
489 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_WF_REPORT_PARAMS
);
492 static union drbd_state
493 apply_mask_val(union drbd_state os
, union drbd_state mask
, union drbd_state val
)
496 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
501 drbd_change_state(struct drbd_device
*device
, enum chg_state_flags f
,
502 union drbd_state mask
, union drbd_state val
)
506 enum drbd_state_rv rv
;
508 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
509 ns
= apply_mask_val(drbd_read_state(device
), mask
, val
);
510 rv
= _drbd_set_state(device
, ns
, f
, NULL
);
511 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
517 * drbd_force_state() - Impose a change which happens outside our control on our state
518 * @device: DRBD device.
519 * @mask: mask of state bits to change.
520 * @val: value of new state bits.
522 void drbd_force_state(struct drbd_device
*device
,
523 union drbd_state mask
, union drbd_state val
)
525 drbd_change_state(device
, CS_HARD
, mask
, val
);
528 static enum drbd_state_rv
529 _req_st_cond(struct drbd_device
*device
, union drbd_state mask
,
530 union drbd_state val
)
532 union drbd_state os
, ns
;
534 enum drbd_state_rv rv
;
536 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &device
->flags
))
537 return SS_CW_SUCCESS
;
539 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &device
->flags
))
540 return SS_CW_FAILED_BY_PEER
;
542 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
543 os
= drbd_read_state(device
);
544 ns
= sanitize_state(device
, os
, apply_mask_val(os
, mask
, val
), NULL
);
545 rv
= is_valid_transition(os
, ns
);
546 if (rv
>= SS_SUCCESS
)
547 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
549 if (!cl_wide_st_chg(device
, os
, ns
))
551 if (rv
== SS_UNKNOWN_ERROR
) {
552 rv
= is_valid_state(device
, ns
);
553 if (rv
>= SS_SUCCESS
) {
554 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
555 if (rv
>= SS_SUCCESS
)
556 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
559 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
565 * drbd_req_state() - Perform an eventually cluster wide state change
566 * @device: DRBD device.
567 * @mask: mask of state bits to change.
568 * @val: value of new state bits.
571 * Should not be called directly, use drbd_request_state() or
572 * _drbd_request_state().
574 static enum drbd_state_rv
575 drbd_req_state(struct drbd_device
*device
, union drbd_state mask
,
576 union drbd_state val
, enum chg_state_flags f
)
578 struct completion done
;
580 union drbd_state os
, ns
;
581 enum drbd_state_rv rv
;
583 init_completion(&done
);
585 if (f
& CS_SERIALIZE
)
586 mutex_lock(device
->state_mutex
);
588 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
589 os
= drbd_read_state(device
);
590 ns
= sanitize_state(device
, os
, apply_mask_val(os
, mask
, val
), NULL
);
591 rv
= is_valid_transition(os
, ns
);
592 if (rv
< SS_SUCCESS
) {
593 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
597 if (cl_wide_st_chg(device
, os
, ns
)) {
598 rv
= is_valid_state(device
, ns
);
599 if (rv
== SS_SUCCESS
)
600 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
601 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
603 if (rv
< SS_SUCCESS
) {
605 print_st_err(device
, os
, ns
, rv
);
609 if (drbd_send_state_req(first_peer_device(device
), mask
, val
)) {
610 rv
= SS_CW_FAILED_BY_PEER
;
612 print_st_err(device
, os
, ns
, rv
);
616 wait_event(device
->state_wait
,
617 (rv
= _req_st_cond(device
, mask
, val
)));
619 if (rv
< SS_SUCCESS
) {
621 print_st_err(device
, os
, ns
, rv
);
624 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
625 ns
= apply_mask_val(drbd_read_state(device
), mask
, val
);
626 rv
= _drbd_set_state(device
, ns
, f
, &done
);
628 rv
= _drbd_set_state(device
, ns
, f
, &done
);
631 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
633 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
634 D_ASSERT(device
, current
!= first_peer_device(device
)->connection
->worker
.task
);
635 wait_for_completion(&done
);
639 if (f
& CS_SERIALIZE
)
640 mutex_unlock(device
->state_mutex
);
646 * _drbd_request_state() - Request a state change (with flags)
647 * @device: DRBD device.
648 * @mask: mask of state bits to change.
649 * @val: value of new state bits.
652 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
653 * flag, or when logging of failed state change requests is not desired.
656 _drbd_request_state(struct drbd_device
*device
, union drbd_state mask
,
657 union drbd_state val
, enum chg_state_flags f
)
659 enum drbd_state_rv rv
;
661 wait_event(device
->state_wait
,
662 (rv
= drbd_req_state(device
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
668 _drbd_request_state_holding_state_mutex(struct drbd_device
*device
, union drbd_state mask
,
669 union drbd_state val
, enum chg_state_flags f
)
671 enum drbd_state_rv rv
;
673 BUG_ON(f
& CS_SERIALIZE
);
675 wait_event_cmd(device
->state_wait
,
676 (rv
= drbd_req_state(device
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
,
677 mutex_unlock(device
->state_mutex
),
678 mutex_lock(device
->state_mutex
));
683 static void print_st(struct drbd_device
*device
, const char *name
, union drbd_state ns
)
685 drbd_err(device
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
687 drbd_conn_str(ns
.conn
),
688 drbd_role_str(ns
.role
),
689 drbd_role_str(ns
.peer
),
690 drbd_disk_str(ns
.disk
),
691 drbd_disk_str(ns
.pdsk
),
692 is_susp(ns
) ? 's' : 'r',
693 ns
.aftr_isp
? 'a' : '-',
694 ns
.peer_isp
? 'p' : '-',
695 ns
.user_isp
? 'u' : '-',
696 ns
.susp_fen
? 'F' : '-',
697 ns
.susp_nod
? 'N' : '-'
701 void print_st_err(struct drbd_device
*device
, union drbd_state os
,
702 union drbd_state ns
, enum drbd_state_rv err
)
704 if (err
== SS_IN_TRANSIENT_STATE
)
706 drbd_err(device
, "State change failed: %s\n", drbd_set_st_err_str(err
));
707 print_st(device
, " state", os
);
708 print_st(device
, "wanted", ns
);
711 static long print_state_change(char *pb
, union drbd_state os
, union drbd_state ns
,
712 enum chg_state_flags flags
)
718 if (ns
.role
!= os
.role
&& flags
& CS_DC_ROLE
)
719 pbp
+= sprintf(pbp
, "role( %s -> %s ) ",
720 drbd_role_str(os
.role
),
721 drbd_role_str(ns
.role
));
722 if (ns
.peer
!= os
.peer
&& flags
& CS_DC_PEER
)
723 pbp
+= sprintf(pbp
, "peer( %s -> %s ) ",
724 drbd_role_str(os
.peer
),
725 drbd_role_str(ns
.peer
));
726 if (ns
.conn
!= os
.conn
&& flags
& CS_DC_CONN
)
727 pbp
+= sprintf(pbp
, "conn( %s -> %s ) ",
728 drbd_conn_str(os
.conn
),
729 drbd_conn_str(ns
.conn
));
730 if (ns
.disk
!= os
.disk
&& flags
& CS_DC_DISK
)
731 pbp
+= sprintf(pbp
, "disk( %s -> %s ) ",
732 drbd_disk_str(os
.disk
),
733 drbd_disk_str(ns
.disk
));
734 if (ns
.pdsk
!= os
.pdsk
&& flags
& CS_DC_PDSK
)
735 pbp
+= sprintf(pbp
, "pdsk( %s -> %s ) ",
736 drbd_disk_str(os
.pdsk
),
737 drbd_disk_str(ns
.pdsk
));
742 static void drbd_pr_state_change(struct drbd_device
*device
, union drbd_state os
, union drbd_state ns
,
743 enum chg_state_flags flags
)
748 pbp
+= print_state_change(pbp
, os
, ns
, flags
^ CS_DC_MASK
);
750 if (ns
.aftr_isp
!= os
.aftr_isp
)
751 pbp
+= sprintf(pbp
, "aftr_isp( %d -> %d ) ",
754 if (ns
.peer_isp
!= os
.peer_isp
)
755 pbp
+= sprintf(pbp
, "peer_isp( %d -> %d ) ",
758 if (ns
.user_isp
!= os
.user_isp
)
759 pbp
+= sprintf(pbp
, "user_isp( %d -> %d ) ",
764 drbd_info(device
, "%s\n", pb
);
767 static void conn_pr_state_change(struct drbd_connection
*connection
, union drbd_state os
, union drbd_state ns
,
768 enum chg_state_flags flags
)
773 pbp
+= print_state_change(pbp
, os
, ns
, flags
);
775 if (is_susp(ns
) != is_susp(os
) && flags
& CS_DC_SUSP
)
776 pbp
+= sprintf(pbp
, "susp( %d -> %d ) ",
781 drbd_info(connection
, "%s\n", pb
);
786 * is_valid_state() - Returns an SS_ error code if ns is not valid
787 * @device: DRBD device.
788 * @ns: State to consider.
790 static enum drbd_state_rv
791 is_valid_state(struct drbd_device
*device
, union drbd_state ns
)
793 /* See drbd_state_sw_errors in drbd_strings.c */
795 enum drbd_fencing_p fp
;
796 enum drbd_state_rv rv
= SS_SUCCESS
;
801 if (get_ldev(device
)) {
802 fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
806 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
808 if (!nc
->two_primaries
&& ns
.role
== R_PRIMARY
) {
809 if (ns
.peer
== R_PRIMARY
)
810 rv
= SS_TWO_PRIMARIES
;
811 else if (conn_highest_peer(first_peer_device(device
)->connection
) == R_PRIMARY
)
812 rv
= SS_O_VOL_PEER_PRI
;
817 goto out
; /* already found a reason to abort */
818 else if (ns
.role
== R_SECONDARY
&& device
->open_cnt
)
819 rv
= SS_DEVICE_IN_USE
;
821 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
822 rv
= SS_NO_UP_TO_DATE_DISK
;
824 else if (fp
>= FP_RESOURCE
&&
825 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
828 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
829 rv
= SS_NO_UP_TO_DATE_DISK
;
831 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
832 rv
= SS_NO_LOCAL_DISK
;
834 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
835 rv
= SS_NO_REMOTE_DISK
;
837 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
838 rv
= SS_NO_UP_TO_DATE_DISK
;
840 else if ((ns
.conn
== C_CONNECTED
||
841 ns
.conn
== C_WF_BITMAP_S
||
842 ns
.conn
== C_SYNC_SOURCE
||
843 ns
.conn
== C_PAUSED_SYNC_S
) &&
844 ns
.disk
== D_OUTDATED
)
845 rv
= SS_CONNECTED_OUTDATES
;
847 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
848 (nc
->verify_alg
[0] == 0))
849 rv
= SS_NO_VERIFY_ALG
;
851 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
852 first_peer_device(device
)->connection
->agreed_pro_version
< 88)
853 rv
= SS_NOT_SUPPORTED
;
855 else if (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
856 rv
= SS_NO_UP_TO_DATE_DISK
;
858 else if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
859 ns
.pdsk
== D_UNKNOWN
)
860 rv
= SS_NEED_CONNECTION
;
862 else if (ns
.conn
>= C_CONNECTED
&& ns
.pdsk
== D_UNKNOWN
)
863 rv
= SS_CONNECTED_OUTDATES
;
872 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
873 * This function limits state transitions that may be declined by DRBD. I.e.
874 * user requests (aka soft transitions).
875 * @device: DRBD device.
879 static enum drbd_state_rv
880 is_valid_soft_transition(union drbd_state os
, union drbd_state ns
, struct drbd_connection
*connection
)
882 enum drbd_state_rv rv
= SS_SUCCESS
;
884 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
885 os
.conn
> C_CONNECTED
)
886 rv
= SS_RESYNC_RUNNING
;
888 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
889 rv
= SS_ALREADY_STANDALONE
;
891 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
894 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
895 rv
= SS_NO_NET_CONFIG
;
897 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
898 rv
= SS_LOWER_THAN_OUTDATED
;
900 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
901 rv
= SS_IN_TRANSIENT_STATE
;
903 /* While establishing a connection only allow cstate to change.
904 Delay/refuse role changes, detach attach etc... (they do not touch cstate) */
905 if (test_bit(STATE_SENT
, &connection
->flags
) &&
906 !((ns
.conn
== C_WF_REPORT_PARAMS
&& os
.conn
== C_WF_CONNECTION
) ||
907 (ns
.conn
>= C_CONNECTED
&& os
.conn
== C_WF_REPORT_PARAMS
)))
908 rv
= SS_IN_TRANSIENT_STATE
;
910 /* Do not promote during resync handshake triggered by "force primary".
911 * This is a hack. It should really be rejected by the peer during the
912 * cluster wide state change request. */
913 if (os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
914 && ns
.pdsk
== D_UP_TO_DATE
915 && ns
.disk
!= D_UP_TO_DATE
&& ns
.disk
!= D_DISKLESS
916 && (ns
.conn
<= C_WF_SYNC_UUID
|| ns
.conn
!= os
.conn
))
917 rv
= SS_IN_TRANSIENT_STATE
;
919 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
920 rv
= SS_NEED_CONNECTION
;
922 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
923 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
924 rv
= SS_RESYNC_RUNNING
;
926 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
927 os
.conn
< C_CONNECTED
)
928 rv
= SS_NEED_CONNECTION
;
930 if ((ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)
931 && os
.conn
< C_WF_REPORT_PARAMS
)
932 rv
= SS_NEED_CONNECTION
; /* No NetworkFailure -> SyncTarget etc... */
934 if (ns
.conn
== C_DISCONNECTING
&& ns
.pdsk
== D_OUTDATED
&&
935 os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
)
936 rv
= SS_OUTDATE_WO_CONN
;
941 static enum drbd_state_rv
942 is_valid_conn_transition(enum drbd_conns oc
, enum drbd_conns nc
)
944 /* no change -> nothing to do, at least for the connection part */
946 return SS_NOTHING_TO_DO
;
948 /* disconnect of an unconfigured connection does not make sense */
949 if (oc
== C_STANDALONE
&& nc
== C_DISCONNECTING
)
950 return SS_ALREADY_STANDALONE
;
952 /* from C_STANDALONE, we start with C_UNCONNECTED */
953 if (oc
== C_STANDALONE
&& nc
!= C_UNCONNECTED
)
954 return SS_NEED_CONNECTION
;
956 /* When establishing a connection we need to go through WF_REPORT_PARAMS!
957 Necessary to do the right thing upon invalidate-remote on a disconnected resource */
958 if (oc
< C_WF_REPORT_PARAMS
&& nc
>= C_CONNECTED
)
959 return SS_NEED_CONNECTION
;
961 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
962 if (oc
>= C_TIMEOUT
&& oc
<= C_TEAR_DOWN
&& nc
!= C_UNCONNECTED
&& nc
!= C_DISCONNECTING
)
963 return SS_IN_TRANSIENT_STATE
;
965 /* After C_DISCONNECTING only C_STANDALONE may follow */
966 if (oc
== C_DISCONNECTING
&& nc
!= C_STANDALONE
)
967 return SS_IN_TRANSIENT_STATE
;
974 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
975 * This limits hard state transitions. Hard state transitions are facts there are
976 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
977 * But those hard state transitions are still not allowed to do everything.
981 static enum drbd_state_rv
982 is_valid_transition(union drbd_state os
, union drbd_state ns
)
984 enum drbd_state_rv rv
;
986 rv
= is_valid_conn_transition(os
.conn
, ns
.conn
);
988 /* we cannot fail (again) if we already detached */
989 if (ns
.disk
== D_FAILED
&& os
.disk
== D_DISKLESS
)
995 static void print_sanitize_warnings(struct drbd_device
*device
, enum sanitize_state_warnings warn
)
997 static const char *msg_table
[] = {
999 [ABORTED_ONLINE_VERIFY
] = "Online-verify aborted.",
1000 [ABORTED_RESYNC
] = "Resync aborted.",
1001 [CONNECTION_LOST_NEGOTIATING
] = "Connection lost while negotiating, no data!",
1002 [IMPLICITLY_UPGRADED_DISK
] = "Implicitly upgraded disk",
1003 [IMPLICITLY_UPGRADED_PDSK
] = "Implicitly upgraded pdsk",
1006 if (warn
!= NO_WARNING
)
1007 drbd_warn(device
, "%s\n", msg_table
[warn
]);
1011 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
1012 * @device: DRBD device.
1017 * When we loose connection, we have to set the state of the peers disk (pdsk)
1018 * to D_UNKNOWN. This rule and many more along those lines are in this function.
1020 static union drbd_state
sanitize_state(struct drbd_device
*device
, union drbd_state os
,
1021 union drbd_state ns
, enum sanitize_state_warnings
*warn
)
1023 enum drbd_fencing_p fp
;
1024 enum drbd_disk_state disk_min
, disk_max
, pdsk_min
, pdsk_max
;
1030 if (get_ldev(device
)) {
1032 fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
1037 /* Implications from connection to peer and peer_isp */
1038 if (ns
.conn
< C_CONNECTED
) {
1040 ns
.peer
= R_UNKNOWN
;
1041 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
1042 ns
.pdsk
= D_UNKNOWN
;
1045 /* Clear the aftr_isp when becoming unconfigured */
1046 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
1049 /* An implication of the disk states onto the connection state */
1050 /* Abort resync if a disk fails/detaches */
1051 if (ns
.conn
> C_CONNECTED
&& (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
1053 *warn
= ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
?
1054 ABORTED_ONLINE_VERIFY
: ABORTED_RESYNC
;
1055 ns
.conn
= C_CONNECTED
;
1058 /* Connection breaks down before we finished "Negotiating" */
1059 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
1060 get_ldev_if_state(device
, D_NEGOTIATING
)) {
1061 if (device
->ed_uuid
== device
->ldev
->md
.uuid
[UI_CURRENT
]) {
1062 ns
.disk
= device
->new_state_tmp
.disk
;
1063 ns
.pdsk
= device
->new_state_tmp
.pdsk
;
1066 *warn
= CONNECTION_LOST_NEGOTIATING
;
1067 ns
.disk
= D_DISKLESS
;
1068 ns
.pdsk
= D_UNKNOWN
;
1073 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
1074 if (ns
.conn
>= C_CONNECTED
&& ns
.conn
< C_AHEAD
) {
1075 if (ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
)
1076 ns
.disk
= D_UP_TO_DATE
;
1077 if (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)
1078 ns
.pdsk
= D_UP_TO_DATE
;
1081 /* Implications of the connection stat on the disk states */
1082 disk_min
= D_DISKLESS
;
1083 disk_max
= D_UP_TO_DATE
;
1084 pdsk_min
= D_INCONSISTENT
;
1085 pdsk_max
= D_UNKNOWN
;
1086 switch ((enum drbd_conns
)ns
.conn
) {
1088 case C_PAUSED_SYNC_T
:
1089 case C_STARTING_SYNC_T
:
1090 case C_WF_SYNC_UUID
:
1092 disk_min
= D_INCONSISTENT
;
1093 disk_max
= D_OUTDATED
;
1094 pdsk_min
= D_UP_TO_DATE
;
1095 pdsk_max
= D_UP_TO_DATE
;
1099 disk_min
= D_UP_TO_DATE
;
1100 disk_max
= D_UP_TO_DATE
;
1101 pdsk_min
= D_UP_TO_DATE
;
1102 pdsk_max
= D_UP_TO_DATE
;
1105 disk_min
= D_DISKLESS
;
1106 disk_max
= D_UP_TO_DATE
;
1107 pdsk_min
= D_DISKLESS
;
1108 pdsk_max
= D_UP_TO_DATE
;
1111 case C_PAUSED_SYNC_S
:
1112 case C_STARTING_SYNC_S
:
1114 disk_min
= D_UP_TO_DATE
;
1115 disk_max
= D_UP_TO_DATE
;
1116 pdsk_min
= D_INCONSISTENT
;
1117 pdsk_max
= D_CONSISTENT
; /* D_OUTDATED would be nice. But explicit outdate necessary*/
1120 disk_min
= D_INCONSISTENT
;
1121 disk_max
= D_INCONSISTENT
;
1122 pdsk_min
= D_UP_TO_DATE
;
1123 pdsk_max
= D_UP_TO_DATE
;
1126 disk_min
= D_UP_TO_DATE
;
1127 disk_max
= D_UP_TO_DATE
;
1128 pdsk_min
= D_INCONSISTENT
;
1129 pdsk_max
= D_INCONSISTENT
;
1132 case C_DISCONNECTING
:
1136 case C_NETWORK_FAILURE
:
1137 case C_PROTOCOL_ERROR
:
1139 case C_WF_CONNECTION
:
1140 case C_WF_REPORT_PARAMS
:
1144 if (ns
.disk
> disk_max
)
1147 if (ns
.disk
< disk_min
) {
1149 *warn
= IMPLICITLY_UPGRADED_DISK
;
1152 if (ns
.pdsk
> pdsk_max
)
1155 if (ns
.pdsk
< pdsk_min
) {
1157 *warn
= IMPLICITLY_UPGRADED_PDSK
;
1161 if (fp
== FP_STONITH
&&
1162 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
) &&
1163 !(os
.role
== R_PRIMARY
&& os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
))
1164 ns
.susp_fen
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
1166 if (device
->resource
->res_opts
.on_no_data
== OND_SUSPEND_IO
&&
1167 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
) &&
1168 !(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
))
1169 ns
.susp_nod
= 1; /* Suspend IO while no data available (no accessible data available) */
1171 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
1172 if (ns
.conn
== C_SYNC_SOURCE
)
1173 ns
.conn
= C_PAUSED_SYNC_S
;
1174 if (ns
.conn
== C_SYNC_TARGET
)
1175 ns
.conn
= C_PAUSED_SYNC_T
;
1177 if (ns
.conn
== C_PAUSED_SYNC_S
)
1178 ns
.conn
= C_SYNC_SOURCE
;
1179 if (ns
.conn
== C_PAUSED_SYNC_T
)
1180 ns
.conn
= C_SYNC_TARGET
;
1186 void drbd_resume_al(struct drbd_device
*device
)
1188 if (test_and_clear_bit(AL_SUSPENDED
, &device
->flags
))
1189 drbd_info(device
, "Resumed AL updates\n");
1192 /* helper for _drbd_set_state */
1193 static void set_ov_position(struct drbd_device
*device
, enum drbd_conns cs
)
1195 if (first_peer_device(device
)->connection
->agreed_pro_version
< 90)
1196 device
->ov_start_sector
= 0;
1197 device
->rs_total
= drbd_bm_bits(device
);
1198 device
->ov_position
= 0;
1199 if (cs
== C_VERIFY_T
) {
1200 /* starting online verify from an arbitrary position
1201 * does not fit well into the existing protocol.
1202 * on C_VERIFY_T, we initialize ov_left and friends
1203 * implicitly in receive_DataRequest once the
1204 * first P_OV_REQUEST is received */
1205 device
->ov_start_sector
= ~(sector_t
)0;
1207 unsigned long bit
= BM_SECT_TO_BIT(device
->ov_start_sector
);
1208 if (bit
>= device
->rs_total
) {
1209 device
->ov_start_sector
=
1210 BM_BIT_TO_SECT(device
->rs_total
- 1);
1211 device
->rs_total
= 1;
1213 device
->rs_total
-= bit
;
1214 device
->ov_position
= device
->ov_start_sector
;
1216 device
->ov_left
= device
->rs_total
;
1220 * _drbd_set_state() - Set a new DRBD state
1221 * @device: DRBD device.
1224 * @done: Optional completion, that will get completed after the after_state_ch() finished
1226 * Caller needs to hold req_lock. Do not call directly.
1229 _drbd_set_state(struct drbd_device
*device
, union drbd_state ns
,
1230 enum chg_state_flags flags
, struct completion
*done
)
1232 struct drbd_peer_device
*peer_device
= first_peer_device(device
);
1233 struct drbd_connection
*connection
= peer_device
? peer_device
->connection
: NULL
;
1234 union drbd_state os
;
1235 enum drbd_state_rv rv
= SS_SUCCESS
;
1236 enum sanitize_state_warnings ssw
;
1237 struct after_state_chg_work
*ascw
;
1238 struct drbd_state_change
*state_change
;
1240 os
= drbd_read_state(device
);
1242 ns
= sanitize_state(device
, os
, ns
, &ssw
);
1244 return SS_NOTHING_TO_DO
;
1246 rv
= is_valid_transition(os
, ns
);
1247 if (rv
< SS_SUCCESS
)
1250 if (!(flags
& CS_HARD
)) {
1251 /* pre-state-change checks ; only look at ns */
1252 /* See drbd_state_sw_errors in drbd_strings.c */
1254 rv
= is_valid_state(device
, ns
);
1255 if (rv
< SS_SUCCESS
) {
1256 /* If the old state was illegal as well, then let
1259 if (is_valid_state(device
, os
) == rv
)
1260 rv
= is_valid_soft_transition(os
, ns
, connection
);
1262 rv
= is_valid_soft_transition(os
, ns
, connection
);
1265 if (rv
< SS_SUCCESS
) {
1266 if (flags
& CS_VERBOSE
)
1267 print_st_err(device
, os
, ns
, rv
);
1271 print_sanitize_warnings(device
, ssw
);
1273 drbd_pr_state_change(device
, os
, ns
, flags
);
1275 /* Display changes to the susp* flags that where caused by the call to
1276 sanitize_state(). Only display it here if we where not called from
1277 _conn_request_state() */
1278 if (!(flags
& CS_DC_SUSP
))
1279 conn_pr_state_change(connection
, os
, ns
,
1280 (flags
& ~CS_DC_MASK
) | CS_DC_SUSP
);
1282 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1283 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1284 * drbd_ldev_destroy() won't happen before our corresponding
1285 * after_state_ch works run, where we put_ldev again. */
1286 if ((os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) ||
1287 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))
1288 atomic_inc(&device
->local_cnt
);
1290 if (!is_sync_state(os
.conn
) && is_sync_state(ns
.conn
))
1291 clear_bit(RS_DONE
, &device
->flags
);
1293 /* FIXME: Have any flags been set earlier in this function already? */
1294 state_change
= remember_old_state(device
->resource
, GFP_ATOMIC
);
1296 /* changes to local_cnt and device flags should be visible before
1297 * changes to state, which again should be visible before anything else
1298 * depending on that change happens. */
1300 device
->state
.i
= ns
.i
;
1301 device
->resource
->susp
= ns
.susp
;
1302 device
->resource
->susp_nod
= ns
.susp_nod
;
1303 device
->resource
->susp_fen
= ns
.susp_fen
;
1306 remember_new_state(state_change
);
1308 /* put replicated vs not-replicated requests in seperate epochs */
1309 if (drbd_should_do_remote((union drbd_dev_state
)os
.i
) !=
1310 drbd_should_do_remote((union drbd_dev_state
)ns
.i
))
1311 start_new_tl_epoch(connection
);
1313 if (os
.disk
== D_ATTACHING
&& ns
.disk
>= D_NEGOTIATING
)
1314 drbd_print_uuids(device
, "attached to UUIDs");
1316 /* Wake up role changes, that were delayed because of connection establishing */
1317 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
!= C_WF_REPORT_PARAMS
&&
1318 no_peer_wf_report_params(connection
)) {
1319 clear_bit(STATE_SENT
, &connection
->flags
);
1320 wake_up_all_devices(connection
);
1323 wake_up(&device
->misc_wait
);
1324 wake_up(&device
->state_wait
);
1325 wake_up(&connection
->ping_wait
);
1327 /* Aborted verify run, or we reached the stop sector.
1328 * Log the last position, unless end-of-device. */
1329 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1330 ns
.conn
<= C_CONNECTED
) {
1331 device
->ov_start_sector
=
1332 BM_BIT_TO_SECT(drbd_bm_bits(device
) - device
->ov_left
);
1333 if (device
->ov_left
)
1334 drbd_info(device
, "Online Verify reached sector %llu\n",
1335 (unsigned long long)device
->ov_start_sector
);
1338 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1339 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1340 drbd_info(device
, "Syncer continues.\n");
1341 device
->rs_paused
+= (long)jiffies
1342 -(long)device
->rs_mark_time
[device
->rs_last_mark
];
1343 if (ns
.conn
== C_SYNC_TARGET
)
1344 mod_timer(&device
->resync_timer
, jiffies
);
1347 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1348 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1349 drbd_info(device
, "Resync suspended\n");
1350 device
->rs_mark_time
[device
->rs_last_mark
] = jiffies
;
1353 if (os
.conn
== C_CONNECTED
&&
1354 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1355 unsigned long now
= jiffies
;
1358 set_ov_position(device
, ns
.conn
);
1359 device
->rs_start
= now
;
1360 device
->rs_last_sect_ev
= 0;
1361 device
->ov_last_oos_size
= 0;
1362 device
->ov_last_oos_start
= 0;
1364 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1365 device
->rs_mark_left
[i
] = device
->ov_left
;
1366 device
->rs_mark_time
[i
] = now
;
1369 drbd_rs_controller_reset(device
);
1371 if (ns
.conn
== C_VERIFY_S
) {
1372 drbd_info(device
, "Starting Online Verify from sector %llu\n",
1373 (unsigned long long)device
->ov_position
);
1374 mod_timer(&device
->resync_timer
, jiffies
);
1378 if (get_ldev(device
)) {
1379 u32 mdf
= device
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1380 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1381 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1383 mdf
&= ~MDF_AL_CLEAN
;
1384 if (test_bit(CRASHED_PRIMARY
, &device
->flags
))
1385 mdf
|= MDF_CRASHED_PRIMARY
;
1386 if (device
->state
.role
== R_PRIMARY
||
1387 (device
->state
.pdsk
< D_INCONSISTENT
&& device
->state
.peer
== R_PRIMARY
))
1388 mdf
|= MDF_PRIMARY_IND
;
1389 if (device
->state
.conn
> C_WF_REPORT_PARAMS
)
1390 mdf
|= MDF_CONNECTED_IND
;
1391 if (device
->state
.disk
> D_INCONSISTENT
)
1392 mdf
|= MDF_CONSISTENT
;
1393 if (device
->state
.disk
> D_OUTDATED
)
1394 mdf
|= MDF_WAS_UP_TO_DATE
;
1395 if (device
->state
.pdsk
<= D_OUTDATED
&& device
->state
.pdsk
>= D_INCONSISTENT
)
1396 mdf
|= MDF_PEER_OUT_DATED
;
1397 if (mdf
!= device
->ldev
->md
.flags
) {
1398 device
->ldev
->md
.flags
= mdf
;
1399 drbd_md_mark_dirty(device
);
1401 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1402 drbd_set_ed_uuid(device
, device
->ldev
->md
.uuid
[UI_CURRENT
]);
1406 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1407 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1408 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1409 set_bit(CONSIDER_RESYNC
, &device
->flags
);
1411 /* Receiver should clean up itself */
1412 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1413 drbd_thread_stop_nowait(&connection
->receiver
);
1415 /* Now the receiver finished cleaning up itself, it should die */
1416 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1417 drbd_thread_stop_nowait(&connection
->receiver
);
1419 /* Upon network failure, we need to restart the receiver. */
1420 if (os
.conn
> C_WF_CONNECTION
&&
1421 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1422 drbd_thread_restart_nowait(&connection
->receiver
);
1424 /* Resume AL writing if we get a connection */
1425 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1426 drbd_resume_al(device
);
1427 connection
->connect_cnt
++;
1430 /* remember last attach time so request_timer_fn() won't
1431 * kill newly established sessions while we are still trying to thaw
1432 * previously frozen IO */
1433 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1434 ns
.disk
> D_NEGOTIATING
)
1435 device
->last_reattach_jif
= jiffies
;
1437 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1441 ascw
->flags
= flags
;
1442 ascw
->w
.cb
= w_after_state_ch
;
1443 ascw
->device
= device
;
1445 ascw
->state_change
= state_change
;
1446 drbd_queue_work(&connection
->sender_work
,
1449 drbd_err(device
, "Could not kmalloc an ascw\n");
1455 static int w_after_state_ch(struct drbd_work
*w
, int unused
)
1457 struct after_state_chg_work
*ascw
=
1458 container_of(w
, struct after_state_chg_work
, w
);
1459 struct drbd_device
*device
= ascw
->device
;
1461 after_state_ch(device
, ascw
->os
, ascw
->ns
, ascw
->flags
, ascw
->state_change
);
1462 forget_state_change(ascw
->state_change
);
1463 if (ascw
->flags
& CS_WAIT_COMPLETE
)
1464 complete(ascw
->done
);
1470 static void abw_start_sync(struct drbd_device
*device
, int rv
)
1473 drbd_err(device
, "Writing the bitmap failed not starting resync.\n");
1474 _drbd_request_state(device
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1478 switch (device
->state
.conn
) {
1479 case C_STARTING_SYNC_T
:
1480 _drbd_request_state(device
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1482 case C_STARTING_SYNC_S
:
1483 drbd_start_resync(device
, C_SYNC_SOURCE
);
1488 int drbd_bitmap_io_from_worker(struct drbd_device
*device
,
1489 int (*io_fn
)(struct drbd_device
*),
1490 char *why
, enum bm_flag flags
)
1494 D_ASSERT(device
, current
== first_peer_device(device
)->connection
->worker
.task
);
1496 /* open coded non-blocking drbd_suspend_io(device); */
1497 atomic_inc(&device
->suspend_cnt
);
1499 drbd_bm_lock(device
, why
, flags
);
1501 drbd_bm_unlock(device
);
1503 drbd_resume_io(device
);
1508 void notify_resource_state_change(struct sk_buff
*skb
,
1510 struct drbd_resource_state_change
*resource_state_change
,
1511 enum drbd_notification_type type
)
1513 struct drbd_resource
*resource
= resource_state_change
->resource
;
1514 struct resource_info resource_info
= {
1515 .res_role
= resource_state_change
->role
[NEW
],
1516 .res_susp
= resource_state_change
->susp
[NEW
],
1517 .res_susp_nod
= resource_state_change
->susp_nod
[NEW
],
1518 .res_susp_fen
= resource_state_change
->susp_fen
[NEW
],
1521 notify_resource_state(skb
, seq
, resource
, &resource_info
, type
);
1524 void notify_connection_state_change(struct sk_buff
*skb
,
1526 struct drbd_connection_state_change
*connection_state_change
,
1527 enum drbd_notification_type type
)
1529 struct drbd_connection
*connection
= connection_state_change
->connection
;
1530 struct connection_info connection_info
= {
1531 .conn_connection_state
= connection_state_change
->cstate
[NEW
],
1532 .conn_role
= connection_state_change
->peer_role
[NEW
],
1535 notify_connection_state(skb
, seq
, connection
, &connection_info
, type
);
1538 void notify_device_state_change(struct sk_buff
*skb
,
1540 struct drbd_device_state_change
*device_state_change
,
1541 enum drbd_notification_type type
)
1543 struct drbd_device
*device
= device_state_change
->device
;
1544 struct device_info device_info
= {
1545 .dev_disk_state
= device_state_change
->disk_state
[NEW
],
1548 notify_device_state(skb
, seq
, device
, &device_info
, type
);
1551 void notify_peer_device_state_change(struct sk_buff
*skb
,
1553 struct drbd_peer_device_state_change
*p
,
1554 enum drbd_notification_type type
)
1556 struct drbd_peer_device
*peer_device
= p
->peer_device
;
1557 struct peer_device_info peer_device_info
= {
1558 .peer_repl_state
= p
->repl_state
[NEW
],
1559 .peer_disk_state
= p
->disk_state
[NEW
],
1560 .peer_resync_susp_user
= p
->resync_susp_user
[NEW
],
1561 .peer_resync_susp_peer
= p
->resync_susp_peer
[NEW
],
1562 .peer_resync_susp_dependency
= p
->resync_susp_dependency
[NEW
],
1565 notify_peer_device_state(skb
, seq
, peer_device
, &peer_device_info
, type
);
1568 static void broadcast_state_change(struct drbd_state_change
*state_change
)
1570 struct drbd_resource_state_change
*resource_state_change
= &state_change
->resource
[0];
1571 bool resource_state_has_changed
;
1572 unsigned int n_device
, n_connection
, n_peer_device
, n_peer_devices
;
1573 void (*last_func
)(struct sk_buff
*, unsigned int, void *,
1574 enum drbd_notification_type
) = NULL
;
1575 void *uninitialized_var(last_arg
);
1577 #define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
1578 #define FINAL_STATE_CHANGE(type) \
1580 last_func(NULL, 0, last_arg, type); \
1582 #define REMEMBER_STATE_CHANGE(func, arg, type) \
1583 ({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \
1584 last_func = (typeof(last_func))func; \
1588 mutex_lock(¬ification_mutex
);
1590 resource_state_has_changed
=
1591 HAS_CHANGED(resource_state_change
->role
) ||
1592 HAS_CHANGED(resource_state_change
->susp
) ||
1593 HAS_CHANGED(resource_state_change
->susp_nod
) ||
1594 HAS_CHANGED(resource_state_change
->susp_fen
);
1596 if (resource_state_has_changed
)
1597 REMEMBER_STATE_CHANGE(notify_resource_state_change
,
1598 resource_state_change
, NOTIFY_CHANGE
);
1600 for (n_connection
= 0; n_connection
< state_change
->n_connections
; n_connection
++) {
1601 struct drbd_connection_state_change
*connection_state_change
=
1602 &state_change
->connections
[n_connection
];
1604 if (HAS_CHANGED(connection_state_change
->peer_role
) ||
1605 HAS_CHANGED(connection_state_change
->cstate
))
1606 REMEMBER_STATE_CHANGE(notify_connection_state_change
,
1607 connection_state_change
, NOTIFY_CHANGE
);
1610 for (n_device
= 0; n_device
< state_change
->n_devices
; n_device
++) {
1611 struct drbd_device_state_change
*device_state_change
=
1612 &state_change
->devices
[n_device
];
1614 if (HAS_CHANGED(device_state_change
->disk_state
))
1615 REMEMBER_STATE_CHANGE(notify_device_state_change
,
1616 device_state_change
, NOTIFY_CHANGE
);
1619 n_peer_devices
= state_change
->n_devices
* state_change
->n_connections
;
1620 for (n_peer_device
= 0; n_peer_device
< n_peer_devices
; n_peer_device
++) {
1621 struct drbd_peer_device_state_change
*p
=
1622 &state_change
->peer_devices
[n_peer_device
];
1624 if (HAS_CHANGED(p
->disk_state
) ||
1625 HAS_CHANGED(p
->repl_state
) ||
1626 HAS_CHANGED(p
->resync_susp_user
) ||
1627 HAS_CHANGED(p
->resync_susp_peer
) ||
1628 HAS_CHANGED(p
->resync_susp_dependency
))
1629 REMEMBER_STATE_CHANGE(notify_peer_device_state_change
,
1633 FINAL_STATE_CHANGE(NOTIFY_CHANGE
);
1634 mutex_unlock(¬ification_mutex
);
1637 #undef FINAL_STATE_CHANGE
1638 #undef REMEMBER_STATE_CHANGE
1641 /* takes old and new peer disk state */
1642 static bool lost_contact_to_peer_data(enum drbd_disk_state os
, enum drbd_disk_state ns
)
1644 if ((os
>= D_INCONSISTENT
&& os
!= D_UNKNOWN
&& os
!= D_OUTDATED
)
1645 && (ns
< D_INCONSISTENT
|| ns
== D_UNKNOWN
|| ns
== D_OUTDATED
))
1648 /* Scenario, starting with normal operation
1649 * Connected Primary/Secondary UpToDate/UpToDate
1650 * NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen)
1652 * Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!)
1655 && (ns
== D_DISKLESS
|| ns
== D_FAILED
|| ns
== D_OUTDATED
))
1662 * after_state_ch() - Perform after state change actions that may sleep
1663 * @device: DRBD device.
1668 static void after_state_ch(struct drbd_device
*device
, union drbd_state os
,
1669 union drbd_state ns
, enum chg_state_flags flags
,
1670 struct drbd_state_change
*state_change
)
1672 struct drbd_resource
*resource
= device
->resource
;
1673 struct drbd_peer_device
*peer_device
= first_peer_device(device
);
1674 struct drbd_connection
*connection
= peer_device
? peer_device
->connection
: NULL
;
1675 struct sib_info sib
;
1677 broadcast_state_change(state_change
);
1679 sib
.sib_reason
= SIB_STATE_CHANGE
;
1683 if ((os
.disk
!= D_UP_TO_DATE
|| os
.pdsk
!= D_UP_TO_DATE
)
1684 && (ns
.disk
== D_UP_TO_DATE
&& ns
.pdsk
== D_UP_TO_DATE
)) {
1685 clear_bit(CRASHED_PRIMARY
, &device
->flags
);
1687 device
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1690 /* Inform userspace about the change... */
1691 drbd_bcast_event(device
, &sib
);
1693 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1694 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1695 drbd_khelper(device
, "pri-on-incon-degr");
1697 /* Here we have the actions that are performed after a
1698 state change. This function might sleep */
1701 enum drbd_req_event what
= NOTHING
;
1703 spin_lock_irq(&device
->resource
->req_lock
);
1704 if (os
.conn
< C_CONNECTED
&& conn_lowest_conn(connection
) >= C_CONNECTED
)
1707 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1708 conn_lowest_disk(connection
) == D_UP_TO_DATE
)
1709 what
= RESTART_FROZEN_DISK_IO
;
1711 if (resource
->susp_nod
&& what
!= NOTHING
) {
1712 _tl_restart(connection
, what
);
1713 _conn_request_state(connection
,
1714 (union drbd_state
) { { .susp_nod
= 1 } },
1715 (union drbd_state
) { { .susp_nod
= 0 } },
1718 spin_unlock_irq(&device
->resource
->req_lock
);
1722 spin_lock_irq(&device
->resource
->req_lock
);
1723 if (resource
->susp_fen
&& conn_lowest_conn(connection
) >= C_CONNECTED
) {
1724 /* case2: The connection was established again: */
1725 struct drbd_peer_device
*peer_device
;
1729 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
1730 clear_bit(NEW_CUR_UUID
, &peer_device
->device
->flags
);
1733 /* We should actively create a new uuid, _before_
1734 * we resume/resent, if the peer is diskless
1735 * (recovery from a multiple error scenario).
1736 * Currently, this happens with a slight delay
1737 * below when checking lost_contact_to_peer_data() ...
1739 _tl_restart(connection
, RESEND
);
1740 _conn_request_state(connection
,
1741 (union drbd_state
) { { .susp_fen
= 1 } },
1742 (union drbd_state
) { { .susp_fen
= 0 } },
1745 spin_unlock_irq(&device
->resource
->req_lock
);
1748 /* Became sync source. With protocol >= 96, we still need to send out
1749 * the sync uuid now. Need to do that before any drbd_send_state, or
1750 * the other side may go "paused sync" before receiving the sync uuids,
1751 * which is unexpected. */
1752 if ((os
.conn
!= C_SYNC_SOURCE
&& os
.conn
!= C_PAUSED_SYNC_S
) &&
1753 (ns
.conn
== C_SYNC_SOURCE
|| ns
.conn
== C_PAUSED_SYNC_S
) &&
1754 connection
->agreed_pro_version
>= 96 && get_ldev(device
)) {
1755 drbd_gen_and_send_sync_uuid(peer_device
);
1759 /* Do not change the order of the if above and the two below... */
1760 if (os
.pdsk
== D_DISKLESS
&&
1761 ns
.pdsk
> D_DISKLESS
&& ns
.pdsk
!= D_UNKNOWN
) { /* attach on the peer */
1762 /* we probably will start a resync soon.
1763 * make sure those things are properly reset. */
1764 device
->rs_total
= 0;
1765 device
->rs_failed
= 0;
1766 atomic_set(&device
->rs_pending_cnt
, 0);
1767 drbd_rs_cancel_all(device
);
1769 drbd_send_uuids(peer_device
);
1770 drbd_send_state(peer_device
, ns
);
1772 /* No point in queuing send_bitmap if we don't have a connection
1773 * anymore, so check also the _current_ state, not only the new state
1774 * at the time this work was queued. */
1775 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
&&
1776 device
->state
.conn
== C_WF_BITMAP_S
)
1777 drbd_queue_bitmap_io(device
, &drbd_send_bitmap
, NULL
,
1778 "send_bitmap (WFBitMapS)",
1779 BM_LOCKED_TEST_ALLOWED
);
1781 /* Lost contact to peer's copy of the data */
1782 if (lost_contact_to_peer_data(os
.pdsk
, ns
.pdsk
)) {
1783 if (get_ldev(device
)) {
1784 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1785 device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1786 if (drbd_suspended(device
)) {
1787 set_bit(NEW_CUR_UUID
, &device
->flags
);
1789 drbd_uuid_new_current(device
);
1790 drbd_send_uuids(peer_device
);
1797 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(device
)) {
1798 if (os
.peer
!= R_PRIMARY
&& ns
.peer
== R_PRIMARY
&&
1799 device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1800 drbd_uuid_new_current(device
);
1801 drbd_send_uuids(peer_device
);
1803 /* D_DISKLESS Peer becomes secondary */
1804 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1805 /* We may still be Primary ourselves.
1806 * No harm done if the bitmap still changes,
1807 * redirtied pages will follow later. */
1808 drbd_bitmap_io_from_worker(device
, &drbd_bm_write
,
1809 "demote diskless peer", BM_LOCKED_SET_ALLOWED
);
1813 /* Write out all changed bits on demote.
1814 * Though, no need to da that just yet
1815 * if there is a resync going on still */
1816 if (os
.role
== R_PRIMARY
&& ns
.role
== R_SECONDARY
&&
1817 device
->state
.conn
<= C_CONNECTED
&& get_ldev(device
)) {
1818 /* No changes to the bitmap expected this time, so assert that,
1819 * even though no harm was done if it did change. */
1820 drbd_bitmap_io_from_worker(device
, &drbd_bm_write
,
1821 "demote", BM_LOCKED_TEST_ALLOWED
);
1825 /* Last part of the attaching process ... */
1826 if (ns
.conn
>= C_CONNECTED
&&
1827 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1828 drbd_send_sizes(peer_device
, 0, 0); /* to start sync... */
1829 drbd_send_uuids(peer_device
);
1830 drbd_send_state(peer_device
, ns
);
1833 /* We want to pause/continue resync, tell peer. */
1834 if (ns
.conn
>= C_CONNECTED
&&
1835 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1836 (os
.user_isp
!= ns
.user_isp
)))
1837 drbd_send_state(peer_device
, ns
);
1839 /* In case one of the isp bits got set, suspend other devices. */
1840 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1841 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1842 suspend_other_sg(device
);
1844 /* Make sure the peer gets informed about eventual state
1845 changes (ISP bits) while we were in WFReportParams. */
1846 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1847 drbd_send_state(peer_device
, ns
);
1849 if (os
.conn
!= C_AHEAD
&& ns
.conn
== C_AHEAD
)
1850 drbd_send_state(peer_device
, ns
);
1852 /* We are in the progress to start a full sync... */
1853 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1854 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1855 /* no other bitmap changes expected during this phase */
1856 drbd_queue_bitmap_io(device
,
1857 &drbd_bmio_set_n_write
, &abw_start_sync
,
1858 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED
);
1860 /* first half of local IO error, failure to attach,
1861 * or administrative detach */
1862 if (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) {
1863 enum drbd_io_error_p eh
= EP_PASS_ON
;
1864 int was_io_error
= 0;
1865 /* corresponding get_ldev was in _drbd_set_state, to serialize
1866 * our cleanup here with the transition to D_DISKLESS.
1867 * But is is still not save to dreference ldev here, since
1868 * we might come from an failed Attach before ldev was set. */
1871 eh
= rcu_dereference(device
->ldev
->disk_conf
)->on_io_error
;
1874 was_io_error
= test_and_clear_bit(WAS_IO_ERROR
, &device
->flags
);
1876 /* Intentionally call this handler first, before drbd_send_state().
1877 * See: 2932204 drbd: call local-io-error handler early
1878 * People may chose to hard-reset the box from this handler.
1879 * It is useful if this looks like a "regular node crash". */
1880 if (was_io_error
&& eh
== EP_CALL_HELPER
)
1881 drbd_khelper(device
, "local-io-error");
1883 /* Immediately allow completion of all application IO,
1884 * that waits for completion from the local disk,
1885 * if this was a force-detach due to disk_timeout
1886 * or administrator request (drbdsetup detach --force).
1887 * Do NOT abort otherwise.
1888 * Aborting local requests may cause serious problems,
1889 * if requests are completed to upper layers already,
1890 * and then later the already submitted local bio completes.
1891 * This can cause DMA into former bio pages that meanwhile
1892 * have been re-used for other things.
1893 * So aborting local requests may cause crashes,
1894 * or even worse, silent data corruption.
1896 if (test_and_clear_bit(FORCE_DETACH
, &device
->flags
))
1897 tl_abort_disk_io(device
);
1899 /* current state still has to be D_FAILED,
1900 * there is only one way out: to D_DISKLESS,
1901 * and that may only happen after our put_ldev below. */
1902 if (device
->state
.disk
!= D_FAILED
)
1904 "ASSERT FAILED: disk is %s during detach\n",
1905 drbd_disk_str(device
->state
.disk
));
1907 if (ns
.conn
>= C_CONNECTED
)
1908 drbd_send_state(peer_device
, ns
);
1910 drbd_rs_cancel_all(device
);
1912 /* In case we want to get something to stable storage still,
1913 * this may be the last chance.
1914 * Following put_ldev may transition to D_DISKLESS. */
1915 drbd_md_sync(device
);
1920 /* second half of local IO error, failure to attach,
1921 * or administrative detach,
1922 * after local_cnt references have reached zero again */
1923 if (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1924 /* We must still be diskless,
1925 * re-attach has to be serialized with this! */
1926 if (device
->state
.disk
!= D_DISKLESS
)
1928 "ASSERT FAILED: disk is %s while going diskless\n",
1929 drbd_disk_str(device
->state
.disk
));
1931 if (ns
.conn
>= C_CONNECTED
)
1932 drbd_send_state(peer_device
, ns
);
1933 /* corresponding get_ldev in __drbd_set_state
1934 * this may finally trigger drbd_ldev_destroy. */
1938 /* Notify peer that I had a local IO error, and did not detached.. */
1939 if (os
.disk
== D_UP_TO_DATE
&& ns
.disk
== D_INCONSISTENT
&& ns
.conn
>= C_CONNECTED
)
1940 drbd_send_state(peer_device
, ns
);
1942 /* Disks got bigger while they were detached */
1943 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1944 test_and_clear_bit(RESYNC_AFTER_NEG
, &device
->flags
)) {
1945 if (ns
.conn
== C_CONNECTED
)
1946 resync_after_online_grow(device
);
1949 /* A resync finished or aborted, wake paused devices... */
1950 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1951 (os
.peer_isp
&& !ns
.peer_isp
) ||
1952 (os
.user_isp
&& !ns
.user_isp
))
1953 resume_next_sg(device
);
1955 /* sync target done with resync. Explicitly notify peer, even though
1956 * it should (at least for non-empty resyncs) already know itself. */
1957 if (os
.disk
< D_UP_TO_DATE
&& os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
== C_CONNECTED
)
1958 drbd_send_state(peer_device
, ns
);
1960 /* Verify finished, or reached stop sector. Peer did not know about
1961 * the stop sector, and we may even have changed the stop sector during
1962 * verify to interrupt/stop early. Send the new state. */
1963 if (os
.conn
== C_VERIFY_S
&& ns
.conn
== C_CONNECTED
1964 && verify_can_do_stop_sector(device
))
1965 drbd_send_state(peer_device
, ns
);
1967 /* This triggers bitmap writeout of potentially still unwritten pages
1968 * if the resync finished cleanly, or aborted because of peer disk
1969 * failure, or on transition from resync back to AHEAD/BEHIND.
1971 * Connection loss is handled in drbd_disconnected() by the receiver.
1973 * For resync aborted because of local disk failure, we cannot do
1974 * any bitmap writeout anymore.
1976 * No harm done if some bits change during this phase.
1978 if ((os
.conn
> C_CONNECTED
&& os
.conn
< C_AHEAD
) &&
1979 (ns
.conn
== C_CONNECTED
|| ns
.conn
>= C_AHEAD
) && get_ldev(device
)) {
1980 drbd_queue_bitmap_io(device
, &drbd_bm_write_copy_pages
, NULL
,
1981 "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED
);
1985 if (ns
.disk
== D_DISKLESS
&&
1986 ns
.conn
== C_STANDALONE
&&
1987 ns
.role
== R_SECONDARY
) {
1988 if (os
.aftr_isp
!= ns
.aftr_isp
)
1989 resume_next_sg(device
);
1992 drbd_md_sync(device
);
1995 struct after_conn_state_chg_work
{
1998 union drbd_state ns_min
;
1999 union drbd_state ns_max
; /* new, max state, over all devices */
2000 enum chg_state_flags flags
;
2001 struct drbd_connection
*connection
;
2002 struct drbd_state_change
*state_change
;
2005 static int w_after_conn_state_ch(struct drbd_work
*w
, int unused
)
2007 struct after_conn_state_chg_work
*acscw
=
2008 container_of(w
, struct after_conn_state_chg_work
, w
);
2009 struct drbd_connection
*connection
= acscw
->connection
;
2010 enum drbd_conns oc
= acscw
->oc
;
2011 union drbd_state ns_max
= acscw
->ns_max
;
2012 struct drbd_peer_device
*peer_device
;
2015 broadcast_state_change(acscw
->state_change
);
2016 forget_state_change(acscw
->state_change
);
2019 /* Upon network configuration, we need to start the receiver */
2020 if (oc
== C_STANDALONE
&& ns_max
.conn
== C_UNCONNECTED
)
2021 drbd_thread_start(&connection
->receiver
);
2023 if (oc
== C_DISCONNECTING
&& ns_max
.conn
== C_STANDALONE
) {
2024 struct net_conf
*old_conf
;
2026 mutex_lock(¬ification_mutex
);
2027 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
2028 notify_peer_device_state(NULL
, 0, peer_device
, NULL
,
2029 NOTIFY_DESTROY
| NOTIFY_CONTINUES
);
2030 notify_connection_state(NULL
, 0, connection
, NULL
, NOTIFY_DESTROY
);
2031 mutex_unlock(¬ification_mutex
);
2033 mutex_lock(&connection
->resource
->conf_update
);
2034 old_conf
= connection
->net_conf
;
2035 connection
->my_addr_len
= 0;
2036 connection
->peer_addr_len
= 0;
2037 RCU_INIT_POINTER(connection
->net_conf
, NULL
);
2038 conn_free_crypto(connection
);
2039 mutex_unlock(&connection
->resource
->conf_update
);
2045 if (ns_max
.susp_fen
) {
2046 /* case1: The outdate peer handler is successful: */
2047 if (ns_max
.pdsk
<= D_OUTDATED
) {
2049 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2050 struct drbd_device
*device
= peer_device
->device
;
2051 if (test_bit(NEW_CUR_UUID
, &device
->flags
)) {
2052 drbd_uuid_new_current(device
);
2053 clear_bit(NEW_CUR_UUID
, &device
->flags
);
2057 spin_lock_irq(&connection
->resource
->req_lock
);
2058 _tl_restart(connection
, CONNECTION_LOST_WHILE_PENDING
);
2059 _conn_request_state(connection
,
2060 (union drbd_state
) { { .susp_fen
= 1 } },
2061 (union drbd_state
) { { .susp_fen
= 0 } },
2063 spin_unlock_irq(&connection
->resource
->req_lock
);
2066 kref_put(&connection
->kref
, drbd_destroy_connection
);
2068 conn_md_sync(connection
);
2073 static void conn_old_common_state(struct drbd_connection
*connection
, union drbd_state
*pcs
, enum chg_state_flags
*pf
)
2075 enum chg_state_flags flags
= ~0;
2076 struct drbd_peer_device
*peer_device
;
2077 int vnr
, first_vol
= 1;
2078 union drbd_dev_state os
, cs
= {
2079 { .role
= R_SECONDARY
,
2081 .conn
= connection
->cstate
,
2087 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2088 struct drbd_device
*device
= peer_device
->device
;
2097 if (cs
.role
!= os
.role
)
2098 flags
&= ~CS_DC_ROLE
;
2100 if (cs
.peer
!= os
.peer
)
2101 flags
&= ~CS_DC_PEER
;
2103 if (cs
.conn
!= os
.conn
)
2104 flags
&= ~CS_DC_CONN
;
2106 if (cs
.disk
!= os
.disk
)
2107 flags
&= ~CS_DC_DISK
;
2109 if (cs
.pdsk
!= os
.pdsk
)
2110 flags
&= ~CS_DC_PDSK
;
2119 static enum drbd_state_rv
2120 conn_is_valid_transition(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2121 enum chg_state_flags flags
)
2123 enum drbd_state_rv rv
= SS_SUCCESS
;
2124 union drbd_state ns
, os
;
2125 struct drbd_peer_device
*peer_device
;
2129 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2130 struct drbd_device
*device
= peer_device
->device
;
2131 os
= drbd_read_state(device
);
2132 ns
= sanitize_state(device
, os
, apply_mask_val(os
, mask
, val
), NULL
);
2134 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
2140 rv
= is_valid_transition(os
, ns
);
2142 if (rv
>= SS_SUCCESS
&& !(flags
& CS_HARD
)) {
2143 rv
= is_valid_state(device
, ns
);
2144 if (rv
< SS_SUCCESS
) {
2145 if (is_valid_state(device
, os
) == rv
)
2146 rv
= is_valid_soft_transition(os
, ns
, connection
);
2148 rv
= is_valid_soft_transition(os
, ns
, connection
);
2151 if (rv
< SS_SUCCESS
) {
2152 if (flags
& CS_VERBOSE
)
2153 print_st_err(device
, os
, ns
, rv
);
2163 conn_set_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2164 union drbd_state
*pns_min
, union drbd_state
*pns_max
, enum chg_state_flags flags
)
2166 union drbd_state ns
, os
, ns_max
= { };
2167 union drbd_state ns_min
= {
2174 struct drbd_peer_device
*peer_device
;
2175 enum drbd_state_rv rv
;
2176 int vnr
, number_of_volumes
= 0;
2178 if (mask
.conn
== C_MASK
) {
2179 /* remember last connect time so request_timer_fn() won't
2180 * kill newly established sessions while we are still trying to thaw
2181 * previously frozen IO */
2182 if (connection
->cstate
!= C_WF_REPORT_PARAMS
&& val
.conn
== C_WF_REPORT_PARAMS
)
2183 connection
->last_reconnect_jif
= jiffies
;
2185 connection
->cstate
= val
.conn
;
2189 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2190 struct drbd_device
*device
= peer_device
->device
;
2191 number_of_volumes
++;
2192 os
= drbd_read_state(device
);
2193 ns
= apply_mask_val(os
, mask
, val
);
2194 ns
= sanitize_state(device
, os
, ns
, NULL
);
2196 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
2199 rv
= _drbd_set_state(device
, ns
, flags
, NULL
);
2200 BUG_ON(rv
< SS_SUCCESS
);
2201 ns
.i
= device
->state
.i
;
2202 ns_max
.role
= max_role(ns
.role
, ns_max
.role
);
2203 ns_max
.peer
= max_role(ns
.peer
, ns_max
.peer
);
2204 ns_max
.conn
= max_t(enum drbd_conns
, ns
.conn
, ns_max
.conn
);
2205 ns_max
.disk
= max_t(enum drbd_disk_state
, ns
.disk
, ns_max
.disk
);
2206 ns_max
.pdsk
= max_t(enum drbd_disk_state
, ns
.pdsk
, ns_max
.pdsk
);
2208 ns_min
.role
= min_role(ns
.role
, ns_min
.role
);
2209 ns_min
.peer
= min_role(ns
.peer
, ns_min
.peer
);
2210 ns_min
.conn
= min_t(enum drbd_conns
, ns
.conn
, ns_min
.conn
);
2211 ns_min
.disk
= min_t(enum drbd_disk_state
, ns
.disk
, ns_min
.disk
);
2212 ns_min
.pdsk
= min_t(enum drbd_disk_state
, ns
.pdsk
, ns_min
.pdsk
);
2216 if (number_of_volumes
== 0) {
2217 ns_min
= ns_max
= (union drbd_state
) { {
2218 .role
= R_SECONDARY
,
2226 ns_min
.susp
= ns_max
.susp
= connection
->resource
->susp
;
2227 ns_min
.susp_nod
= ns_max
.susp_nod
= connection
->resource
->susp_nod
;
2228 ns_min
.susp_fen
= ns_max
.susp_fen
= connection
->resource
->susp_fen
;
2234 static enum drbd_state_rv
2235 _conn_rq_cond(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
)
2237 enum drbd_state_rv err
, rv
= SS_UNKNOWN_ERROR
; /* continue waiting */;
2239 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY
, &connection
->flags
))
2242 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL
, &connection
->flags
))
2243 rv
= SS_CW_FAILED_BY_PEER
;
2245 err
= conn_is_valid_transition(connection
, mask
, val
, 0);
2246 if (err
== SS_SUCCESS
&& connection
->cstate
== C_WF_REPORT_PARAMS
)
2253 _conn_request_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2254 enum chg_state_flags flags
)
2256 enum drbd_state_rv rv
= SS_SUCCESS
;
2257 struct after_conn_state_chg_work
*acscw
;
2258 enum drbd_conns oc
= connection
->cstate
;
2259 union drbd_state ns_max
, ns_min
, os
;
2260 bool have_mutex
= false;
2261 struct drbd_state_change
*state_change
;
2264 rv
= is_valid_conn_transition(oc
, val
.conn
);
2265 if (rv
< SS_SUCCESS
)
2269 rv
= conn_is_valid_transition(connection
, mask
, val
, flags
);
2270 if (rv
< SS_SUCCESS
)
2273 if (oc
== C_WF_REPORT_PARAMS
&& val
.conn
== C_DISCONNECTING
&&
2274 !(flags
& (CS_LOCAL_ONLY
| CS_HARD
))) {
2276 /* This will be a cluster-wide state change.
2277 * Need to give up the spinlock, grab the mutex,
2278 * then send the state change request, ... */
2279 spin_unlock_irq(&connection
->resource
->req_lock
);
2280 mutex_lock(&connection
->cstate_mutex
);
2283 set_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
2284 if (conn_send_state_req(connection
, mask
, val
)) {
2285 /* sending failed. */
2286 clear_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
2287 rv
= SS_CW_FAILED_BY_PEER
;
2288 /* need to re-aquire the spin lock, though */
2289 goto abort_unlocked
;
2292 if (val
.conn
== C_DISCONNECTING
)
2293 set_bit(DISCONNECT_SENT
, &connection
->flags
);
2295 /* ... and re-aquire the spinlock.
2296 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
2297 * conn_set_state() within the same spinlock. */
2298 spin_lock_irq(&connection
->resource
->req_lock
);
2299 wait_event_lock_irq(connection
->ping_wait
,
2300 (rv
= _conn_rq_cond(connection
, mask
, val
)),
2301 connection
->resource
->req_lock
);
2302 clear_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
2303 if (rv
< SS_SUCCESS
)
2307 state_change
= remember_old_state(connection
->resource
, GFP_ATOMIC
);
2308 conn_old_common_state(connection
, &os
, &flags
);
2309 flags
|= CS_DC_SUSP
;
2310 conn_set_state(connection
, mask
, val
, &ns_min
, &ns_max
, flags
);
2311 conn_pr_state_change(connection
, os
, ns_max
, flags
);
2312 remember_new_state(state_change
);
2314 acscw
= kmalloc(sizeof(*acscw
), GFP_ATOMIC
);
2316 acscw
->oc
= os
.conn
;
2317 acscw
->ns_min
= ns_min
;
2318 acscw
->ns_max
= ns_max
;
2319 acscw
->flags
= flags
;
2320 acscw
->w
.cb
= w_after_conn_state_ch
;
2321 kref_get(&connection
->kref
);
2322 acscw
->connection
= connection
;
2323 acscw
->state_change
= state_change
;
2324 drbd_queue_work(&connection
->sender_work
, &acscw
->w
);
2326 drbd_err(connection
, "Could not kmalloc an acscw\n");
2331 /* mutex_unlock() "... must not be used in interrupt context.",
2332 * so give up the spinlock, then re-aquire it */
2333 spin_unlock_irq(&connection
->resource
->req_lock
);
2335 mutex_unlock(&connection
->cstate_mutex
);
2336 spin_lock_irq(&connection
->resource
->req_lock
);
2338 if (rv
< SS_SUCCESS
&& flags
& CS_VERBOSE
) {
2339 drbd_err(connection
, "State change failed: %s\n", drbd_set_st_err_str(rv
));
2340 drbd_err(connection
, " mask = 0x%x val = 0x%x\n", mask
.i
, val
.i
);
2341 drbd_err(connection
, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc
), drbd_conn_str(val
.conn
));
2347 conn_request_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2348 enum chg_state_flags flags
)
2350 enum drbd_state_rv rv
;
2352 spin_lock_irq(&connection
->resource
->req_lock
);
2353 rv
= _conn_request_state(connection
, mask
, val
, flags
);
2354 spin_unlock_irq(&connection
->resource
->req_lock
);