1 // SPDX-License-Identifier: GPL-2.0-only
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
11 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
12 from Logicworks, Inc. for making SDP replication support possible.
16 #include <linux/drbd_limits.h>
18 #include "drbd_protocol.h"
20 #include "drbd_state_change.h"
22 struct after_state_chg_work
{
24 struct drbd_device
*device
;
27 enum chg_state_flags flags
;
28 struct completion
*done
;
29 struct drbd_state_change
*state_change
;
32 enum sanitize_state_warnings
{
34 ABORTED_ONLINE_VERIFY
,
36 CONNECTION_LOST_NEGOTIATING
,
37 IMPLICITLY_UPGRADED_DISK
,
38 IMPLICITLY_UPGRADED_PDSK
,
41 static void count_objects(struct drbd_resource
*resource
,
42 unsigned int *n_devices
,
43 unsigned int *n_connections
)
45 struct drbd_device
*device
;
46 struct drbd_connection
*connection
;
52 idr_for_each_entry(&resource
->devices
, device
, vnr
)
54 for_each_connection(connection
, resource
)
58 static struct drbd_state_change
*alloc_state_change(unsigned int n_devices
, unsigned int n_connections
, gfp_t gfp
)
60 struct drbd_state_change
*state_change
;
63 size
= sizeof(struct drbd_state_change
) +
64 n_devices
* sizeof(struct drbd_device_state_change
) +
65 n_connections
* sizeof(struct drbd_connection_state_change
) +
66 n_devices
* n_connections
* sizeof(struct drbd_peer_device_state_change
);
67 state_change
= kmalloc(size
, gfp
);
70 state_change
->n_devices
= n_devices
;
71 state_change
->n_connections
= n_connections
;
72 state_change
->devices
= (void *)(state_change
+ 1);
73 state_change
->connections
= (void *)&state_change
->devices
[n_devices
];
74 state_change
->peer_devices
= (void *)&state_change
->connections
[n_connections
];
75 state_change
->resource
->resource
= NULL
;
76 for (n
= 0; n
< n_devices
; n
++)
77 state_change
->devices
[n
].device
= NULL
;
78 for (n
= 0; n
< n_connections
; n
++)
79 state_change
->connections
[n
].connection
= NULL
;
83 struct drbd_state_change
*remember_old_state(struct drbd_resource
*resource
, gfp_t gfp
)
85 struct drbd_state_change
*state_change
;
86 struct drbd_device
*device
;
87 unsigned int n_devices
;
88 struct drbd_connection
*connection
;
89 unsigned int n_connections
;
92 struct drbd_device_state_change
*device_state_change
;
93 struct drbd_peer_device_state_change
*peer_device_state_change
;
94 struct drbd_connection_state_change
*connection_state_change
;
96 /* Caller holds req_lock spinlock.
97 * No state, no device IDR, no connections lists can change. */
98 count_objects(resource
, &n_devices
, &n_connections
);
99 state_change
= alloc_state_change(n_devices
, n_connections
, gfp
);
103 kref_get(&resource
->kref
);
104 state_change
->resource
->resource
= resource
;
105 state_change
->resource
->role
[OLD
] =
106 conn_highest_role(first_connection(resource
));
107 state_change
->resource
->susp
[OLD
] = resource
->susp
;
108 state_change
->resource
->susp_nod
[OLD
] = resource
->susp_nod
;
109 state_change
->resource
->susp_fen
[OLD
] = resource
->susp_fen
;
111 connection_state_change
= state_change
->connections
;
112 for_each_connection(connection
, resource
) {
113 kref_get(&connection
->kref
);
114 connection_state_change
->connection
= connection
;
115 connection_state_change
->cstate
[OLD
] =
117 connection_state_change
->peer_role
[OLD
] =
118 conn_highest_peer(connection
);
119 connection_state_change
++;
122 device_state_change
= state_change
->devices
;
123 peer_device_state_change
= state_change
->peer_devices
;
124 idr_for_each_entry(&resource
->devices
, device
, vnr
) {
125 kref_get(&device
->kref
);
126 device_state_change
->device
= device
;
127 device_state_change
->disk_state
[OLD
] = device
->state
.disk
;
129 /* The peer_devices for each device have to be enumerated in
130 the order of the connections. We may not use for_each_peer_device() here. */
131 for_each_connection(connection
, resource
) {
132 struct drbd_peer_device
*peer_device
;
134 peer_device
= conn_peer_device(connection
, device
->vnr
);
135 peer_device_state_change
->peer_device
= peer_device
;
136 peer_device_state_change
->disk_state
[OLD
] =
138 peer_device_state_change
->repl_state
[OLD
] =
139 max_t(enum drbd_conns
,
140 C_WF_REPORT_PARAMS
, device
->state
.conn
);
141 peer_device_state_change
->resync_susp_user
[OLD
] =
142 device
->state
.user_isp
;
143 peer_device_state_change
->resync_susp_peer
[OLD
] =
144 device
->state
.peer_isp
;
145 peer_device_state_change
->resync_susp_dependency
[OLD
] =
146 device
->state
.aftr_isp
;
147 peer_device_state_change
++;
149 device_state_change
++;
155 static void remember_new_state(struct drbd_state_change
*state_change
)
157 struct drbd_resource_state_change
*resource_state_change
;
158 struct drbd_resource
*resource
;
164 resource_state_change
= &state_change
->resource
[0];
165 resource
= resource_state_change
->resource
;
167 resource_state_change
->role
[NEW
] =
168 conn_highest_role(first_connection(resource
));
169 resource_state_change
->susp
[NEW
] = resource
->susp
;
170 resource_state_change
->susp_nod
[NEW
] = resource
->susp_nod
;
171 resource_state_change
->susp_fen
[NEW
] = resource
->susp_fen
;
173 for (n
= 0; n
< state_change
->n_devices
; n
++) {
174 struct drbd_device_state_change
*device_state_change
=
175 &state_change
->devices
[n
];
176 struct drbd_device
*device
= device_state_change
->device
;
178 device_state_change
->disk_state
[NEW
] = device
->state
.disk
;
181 for (n
= 0; n
< state_change
->n_connections
; n
++) {
182 struct drbd_connection_state_change
*connection_state_change
=
183 &state_change
->connections
[n
];
184 struct drbd_connection
*connection
=
185 connection_state_change
->connection
;
187 connection_state_change
->cstate
[NEW
] = connection
->cstate
;
188 connection_state_change
->peer_role
[NEW
] =
189 conn_highest_peer(connection
);
192 for (n
= 0; n
< state_change
->n_devices
* state_change
->n_connections
; n
++) {
193 struct drbd_peer_device_state_change
*peer_device_state_change
=
194 &state_change
->peer_devices
[n
];
195 struct drbd_device
*device
=
196 peer_device_state_change
->peer_device
->device
;
197 union drbd_dev_state state
= device
->state
;
199 peer_device_state_change
->disk_state
[NEW
] = state
.pdsk
;
200 peer_device_state_change
->repl_state
[NEW
] =
201 max_t(enum drbd_conns
, C_WF_REPORT_PARAMS
, state
.conn
);
202 peer_device_state_change
->resync_susp_user
[NEW
] =
204 peer_device_state_change
->resync_susp_peer
[NEW
] =
206 peer_device_state_change
->resync_susp_dependency
[NEW
] =
211 void copy_old_to_new_state_change(struct drbd_state_change
*state_change
)
213 struct drbd_resource_state_change
*resource_state_change
= &state_change
->resource
[0];
214 unsigned int n_device
, n_connection
, n_peer_device
, n_peer_devices
;
216 #define OLD_TO_NEW(x) \
219 OLD_TO_NEW(resource_state_change
->role
);
220 OLD_TO_NEW(resource_state_change
->susp
);
221 OLD_TO_NEW(resource_state_change
->susp_nod
);
222 OLD_TO_NEW(resource_state_change
->susp_fen
);
224 for (n_connection
= 0; n_connection
< state_change
->n_connections
; n_connection
++) {
225 struct drbd_connection_state_change
*connection_state_change
=
226 &state_change
->connections
[n_connection
];
228 OLD_TO_NEW(connection_state_change
->peer_role
);
229 OLD_TO_NEW(connection_state_change
->cstate
);
232 for (n_device
= 0; n_device
< state_change
->n_devices
; n_device
++) {
233 struct drbd_device_state_change
*device_state_change
=
234 &state_change
->devices
[n_device
];
236 OLD_TO_NEW(device_state_change
->disk_state
);
239 n_peer_devices
= state_change
->n_devices
* state_change
->n_connections
;
240 for (n_peer_device
= 0; n_peer_device
< n_peer_devices
; n_peer_device
++) {
241 struct drbd_peer_device_state_change
*p
=
242 &state_change
->peer_devices
[n_peer_device
];
244 OLD_TO_NEW(p
->disk_state
);
245 OLD_TO_NEW(p
->repl_state
);
246 OLD_TO_NEW(p
->resync_susp_user
);
247 OLD_TO_NEW(p
->resync_susp_peer
);
248 OLD_TO_NEW(p
->resync_susp_dependency
);
254 void forget_state_change(struct drbd_state_change
*state_change
)
261 if (state_change
->resource
->resource
)
262 kref_put(&state_change
->resource
->resource
->kref
, drbd_destroy_resource
);
263 for (n
= 0; n
< state_change
->n_devices
; n
++) {
264 struct drbd_device
*device
= state_change
->devices
[n
].device
;
267 kref_put(&device
->kref
, drbd_destroy_device
);
269 for (n
= 0; n
< state_change
->n_connections
; n
++) {
270 struct drbd_connection
*connection
=
271 state_change
->connections
[n
].connection
;
274 kref_put(&connection
->kref
, drbd_destroy_connection
);
279 static int w_after_state_ch(struct drbd_work
*w
, int unused
);
280 static void after_state_ch(struct drbd_device
*device
, union drbd_state os
,
281 union drbd_state ns
, enum chg_state_flags flags
,
282 struct drbd_state_change
*);
283 static enum drbd_state_rv
is_valid_state(struct drbd_device
*, union drbd_state
);
284 static enum drbd_state_rv
is_valid_soft_transition(union drbd_state
, union drbd_state
, struct drbd_connection
*);
285 static enum drbd_state_rv
is_valid_transition(union drbd_state os
, union drbd_state ns
);
286 static union drbd_state
sanitize_state(struct drbd_device
*device
, union drbd_state os
,
287 union drbd_state ns
, enum sanitize_state_warnings
*warn
);
289 static inline bool is_susp(union drbd_state s
)
291 return s
.susp
|| s
.susp_nod
|| s
.susp_fen
;
294 bool conn_all_vols_unconf(struct drbd_connection
*connection
)
296 struct drbd_peer_device
*peer_device
;
301 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
302 struct drbd_device
*device
= peer_device
->device
;
303 if (device
->state
.disk
!= D_DISKLESS
||
304 device
->state
.conn
!= C_STANDALONE
||
305 device
->state
.role
!= R_SECONDARY
) {
315 /* Unfortunately the states where not correctly ordered, when
316 they where defined. therefore can not use max_t() here. */
317 static enum drbd_role
max_role(enum drbd_role role1
, enum drbd_role role2
)
319 if (role1
== R_PRIMARY
|| role2
== R_PRIMARY
)
321 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
326 static enum drbd_role
min_role(enum drbd_role role1
, enum drbd_role role2
)
328 if (role1
== R_UNKNOWN
|| role2
== R_UNKNOWN
)
330 if (role1
== R_SECONDARY
|| role2
== R_SECONDARY
)
335 enum drbd_role
conn_highest_role(struct drbd_connection
*connection
)
337 enum drbd_role role
= R_SECONDARY
;
338 struct drbd_peer_device
*peer_device
;
342 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
343 struct drbd_device
*device
= peer_device
->device
;
344 role
= max_role(role
, device
->state
.role
);
351 enum drbd_role
conn_highest_peer(struct drbd_connection
*connection
)
353 enum drbd_role peer
= R_UNKNOWN
;
354 struct drbd_peer_device
*peer_device
;
358 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
359 struct drbd_device
*device
= peer_device
->device
;
360 peer
= max_role(peer
, device
->state
.peer
);
367 enum drbd_disk_state
conn_highest_disk(struct drbd_connection
*connection
)
369 enum drbd_disk_state disk_state
= D_DISKLESS
;
370 struct drbd_peer_device
*peer_device
;
374 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
375 struct drbd_device
*device
= peer_device
->device
;
376 disk_state
= max_t(enum drbd_disk_state
, disk_state
, device
->state
.disk
);
383 enum drbd_disk_state
conn_lowest_disk(struct drbd_connection
*connection
)
385 enum drbd_disk_state disk_state
= D_MASK
;
386 struct drbd_peer_device
*peer_device
;
390 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
391 struct drbd_device
*device
= peer_device
->device
;
392 disk_state
= min_t(enum drbd_disk_state
, disk_state
, device
->state
.disk
);
399 enum drbd_disk_state
conn_highest_pdsk(struct drbd_connection
*connection
)
401 enum drbd_disk_state disk_state
= D_DISKLESS
;
402 struct drbd_peer_device
*peer_device
;
406 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
407 struct drbd_device
*device
= peer_device
->device
;
408 disk_state
= max_t(enum drbd_disk_state
, disk_state
, device
->state
.pdsk
);
415 enum drbd_conns
conn_lowest_conn(struct drbd_connection
*connection
)
417 enum drbd_conns conn
= C_MASK
;
418 struct drbd_peer_device
*peer_device
;
422 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
423 struct drbd_device
*device
= peer_device
->device
;
424 conn
= min_t(enum drbd_conns
, conn
, device
->state
.conn
);
431 static bool no_peer_wf_report_params(struct drbd_connection
*connection
)
433 struct drbd_peer_device
*peer_device
;
438 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
439 if (peer_device
->device
->state
.conn
== C_WF_REPORT_PARAMS
) {
448 static void wake_up_all_devices(struct drbd_connection
*connection
)
450 struct drbd_peer_device
*peer_device
;
454 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
455 wake_up(&peer_device
->device
->state_wait
);
462 * cl_wide_st_chg() - true if the state change is a cluster wide one
463 * @device: DRBD device.
464 * @os: old (current) state.
465 * @ns: new (wanted) state.
467 static int cl_wide_st_chg(struct drbd_device
*device
,
468 union drbd_state os
, union drbd_state ns
)
470 return (os
.conn
>= C_CONNECTED
&& ns
.conn
>= C_CONNECTED
&&
471 ((os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
) ||
472 (os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
473 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
) ||
474 (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
))) ||
475 (os
.conn
>= C_CONNECTED
&& ns
.conn
== C_DISCONNECTING
) ||
476 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_VERIFY_S
) ||
477 (os
.conn
== C_CONNECTED
&& ns
.conn
== C_WF_REPORT_PARAMS
);
480 static union drbd_state
481 apply_mask_val(union drbd_state os
, union drbd_state mask
, union drbd_state val
)
484 ns
.i
= (os
.i
& ~mask
.i
) | val
.i
;
489 drbd_change_state(struct drbd_device
*device
, enum chg_state_flags f
,
490 union drbd_state mask
, union drbd_state val
)
494 enum drbd_state_rv rv
;
496 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
497 ns
= apply_mask_val(drbd_read_state(device
), mask
, val
);
498 rv
= _drbd_set_state(device
, ns
, f
, NULL
);
499 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
505 * drbd_force_state() - Impose a change which happens outside our control on our state
506 * @device: DRBD device.
507 * @mask: mask of state bits to change.
508 * @val: value of new state bits.
510 void drbd_force_state(struct drbd_device
*device
,
511 union drbd_state mask
, union drbd_state val
)
513 drbd_change_state(device
, CS_HARD
, mask
, val
);
516 static enum drbd_state_rv
517 _req_st_cond(struct drbd_device
*device
, union drbd_state mask
,
518 union drbd_state val
)
520 union drbd_state os
, ns
;
522 enum drbd_state_rv rv
;
524 if (test_and_clear_bit(CL_ST_CHG_SUCCESS
, &device
->flags
))
525 return SS_CW_SUCCESS
;
527 if (test_and_clear_bit(CL_ST_CHG_FAIL
, &device
->flags
))
528 return SS_CW_FAILED_BY_PEER
;
530 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
531 os
= drbd_read_state(device
);
532 ns
= sanitize_state(device
, os
, apply_mask_val(os
, mask
, val
), NULL
);
533 rv
= is_valid_transition(os
, ns
);
534 if (rv
>= SS_SUCCESS
)
535 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
537 if (!cl_wide_st_chg(device
, os
, ns
))
539 if (rv
== SS_UNKNOWN_ERROR
) {
540 rv
= is_valid_state(device
, ns
);
541 if (rv
>= SS_SUCCESS
) {
542 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
543 if (rv
>= SS_SUCCESS
)
544 rv
= SS_UNKNOWN_ERROR
; /* cont waiting, otherwise fail. */
547 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
553 * drbd_req_state() - Perform an eventually cluster wide state change
554 * @device: DRBD device.
555 * @mask: mask of state bits to change.
556 * @val: value of new state bits.
559 * Should not be called directly, use drbd_request_state() or
560 * _drbd_request_state().
562 static enum drbd_state_rv
563 drbd_req_state(struct drbd_device
*device
, union drbd_state mask
,
564 union drbd_state val
, enum chg_state_flags f
)
566 struct completion done
;
568 union drbd_state os
, ns
;
569 enum drbd_state_rv rv
;
572 init_completion(&done
);
574 if (f
& CS_SERIALIZE
)
575 mutex_lock(device
->state_mutex
);
576 if (f
& CS_INHIBIT_MD_IO
)
577 buffer
= drbd_md_get_buffer(device
, __func__
);
579 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
580 os
= drbd_read_state(device
);
581 ns
= sanitize_state(device
, os
, apply_mask_val(os
, mask
, val
), NULL
);
582 rv
= is_valid_transition(os
, ns
);
583 if (rv
< SS_SUCCESS
) {
584 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
588 if (cl_wide_st_chg(device
, os
, ns
)) {
589 rv
= is_valid_state(device
, ns
);
590 if (rv
== SS_SUCCESS
)
591 rv
= is_valid_soft_transition(os
, ns
, first_peer_device(device
)->connection
);
592 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
594 if (rv
< SS_SUCCESS
) {
596 print_st_err(device
, os
, ns
, rv
);
600 if (drbd_send_state_req(first_peer_device(device
), mask
, val
)) {
601 rv
= SS_CW_FAILED_BY_PEER
;
603 print_st_err(device
, os
, ns
, rv
);
607 wait_event(device
->state_wait
,
608 (rv
= _req_st_cond(device
, mask
, val
)));
610 if (rv
< SS_SUCCESS
) {
612 print_st_err(device
, os
, ns
, rv
);
615 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
616 ns
= apply_mask_val(drbd_read_state(device
), mask
, val
);
617 rv
= _drbd_set_state(device
, ns
, f
, &done
);
619 rv
= _drbd_set_state(device
, ns
, f
, &done
);
622 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
624 if (f
& CS_WAIT_COMPLETE
&& rv
== SS_SUCCESS
) {
625 D_ASSERT(device
, current
!= first_peer_device(device
)->connection
->worker
.task
);
626 wait_for_completion(&done
);
631 drbd_md_put_buffer(device
);
632 if (f
& CS_SERIALIZE
)
633 mutex_unlock(device
->state_mutex
);
639 * _drbd_request_state() - Request a state change (with flags)
640 * @device: DRBD device.
641 * @mask: mask of state bits to change.
642 * @val: value of new state bits.
645 * Cousin of drbd_request_state(), useful with the CS_WAIT_COMPLETE
646 * flag, or when logging of failed state change requests is not desired.
649 _drbd_request_state(struct drbd_device
*device
, union drbd_state mask
,
650 union drbd_state val
, enum chg_state_flags f
)
652 enum drbd_state_rv rv
;
654 wait_event(device
->state_wait
,
655 (rv
= drbd_req_state(device
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
);
661 * We grab drbd_md_get_buffer(), because we don't want to "fail" the disk while
662 * there is IO in-flight: the transition into D_FAILED for detach purposes
663 * may get misinterpreted as actual IO error in a confused endio function.
665 * We wrap it all into wait_event(), to retry in case the drbd_req_state()
666 * returns SS_IN_TRANSIENT_STATE.
668 * To avoid potential deadlock with e.g. the receiver thread trying to grab
669 * drbd_md_get_buffer() while trying to get out of the "transient state", we
670 * need to grab and release the meta data buffer inside of that wait_event loop.
672 static enum drbd_state_rv
673 request_detach(struct drbd_device
*device
)
675 return drbd_req_state(device
, NS(disk
, D_FAILED
),
676 CS_VERBOSE
| CS_ORDERED
| CS_INHIBIT_MD_IO
);
679 int drbd_request_detach_interruptible(struct drbd_device
*device
)
683 drbd_suspend_io(device
); /* so no-one is stuck in drbd_al_begin_io */
684 wait_event_interruptible(device
->state_wait
,
685 (rv
= request_detach(device
)) != SS_IN_TRANSIENT_STATE
);
686 drbd_resume_io(device
);
688 ret
= wait_event_interruptible(device
->misc_wait
,
689 device
->state
.disk
!= D_FAILED
);
691 if (rv
== SS_IS_DISKLESS
)
692 rv
= SS_NOTHING_TO_DO
;
700 _drbd_request_state_holding_state_mutex(struct drbd_device
*device
, union drbd_state mask
,
701 union drbd_state val
, enum chg_state_flags f
)
703 enum drbd_state_rv rv
;
705 BUG_ON(f
& CS_SERIALIZE
);
707 wait_event_cmd(device
->state_wait
,
708 (rv
= drbd_req_state(device
, mask
, val
, f
)) != SS_IN_TRANSIENT_STATE
,
709 mutex_unlock(device
->state_mutex
),
710 mutex_lock(device
->state_mutex
));
715 static void print_st(struct drbd_device
*device
, const char *name
, union drbd_state ns
)
717 drbd_err(device
, " %s = { cs:%s ro:%s/%s ds:%s/%s %c%c%c%c%c%c }\n",
719 drbd_conn_str(ns
.conn
),
720 drbd_role_str(ns
.role
),
721 drbd_role_str(ns
.peer
),
722 drbd_disk_str(ns
.disk
),
723 drbd_disk_str(ns
.pdsk
),
724 is_susp(ns
) ? 's' : 'r',
725 ns
.aftr_isp
? 'a' : '-',
726 ns
.peer_isp
? 'p' : '-',
727 ns
.user_isp
? 'u' : '-',
728 ns
.susp_fen
? 'F' : '-',
729 ns
.susp_nod
? 'N' : '-'
733 void print_st_err(struct drbd_device
*device
, union drbd_state os
,
734 union drbd_state ns
, enum drbd_state_rv err
)
736 if (err
== SS_IN_TRANSIENT_STATE
)
738 drbd_err(device
, "State change failed: %s\n", drbd_set_st_err_str(err
));
739 print_st(device
, " state", os
);
740 print_st(device
, "wanted", ns
);
743 static long print_state_change(char *pb
, union drbd_state os
, union drbd_state ns
,
744 enum chg_state_flags flags
)
750 if (ns
.role
!= os
.role
&& flags
& CS_DC_ROLE
)
751 pbp
+= sprintf(pbp
, "role( %s -> %s ) ",
752 drbd_role_str(os
.role
),
753 drbd_role_str(ns
.role
));
754 if (ns
.peer
!= os
.peer
&& flags
& CS_DC_PEER
)
755 pbp
+= sprintf(pbp
, "peer( %s -> %s ) ",
756 drbd_role_str(os
.peer
),
757 drbd_role_str(ns
.peer
));
758 if (ns
.conn
!= os
.conn
&& flags
& CS_DC_CONN
)
759 pbp
+= sprintf(pbp
, "conn( %s -> %s ) ",
760 drbd_conn_str(os
.conn
),
761 drbd_conn_str(ns
.conn
));
762 if (ns
.disk
!= os
.disk
&& flags
& CS_DC_DISK
)
763 pbp
+= sprintf(pbp
, "disk( %s -> %s ) ",
764 drbd_disk_str(os
.disk
),
765 drbd_disk_str(ns
.disk
));
766 if (ns
.pdsk
!= os
.pdsk
&& flags
& CS_DC_PDSK
)
767 pbp
+= sprintf(pbp
, "pdsk( %s -> %s ) ",
768 drbd_disk_str(os
.pdsk
),
769 drbd_disk_str(ns
.pdsk
));
774 static void drbd_pr_state_change(struct drbd_device
*device
, union drbd_state os
, union drbd_state ns
,
775 enum chg_state_flags flags
)
780 pbp
+= print_state_change(pbp
, os
, ns
, flags
^ CS_DC_MASK
);
782 if (ns
.aftr_isp
!= os
.aftr_isp
)
783 pbp
+= sprintf(pbp
, "aftr_isp( %d -> %d ) ",
786 if (ns
.peer_isp
!= os
.peer_isp
)
787 pbp
+= sprintf(pbp
, "peer_isp( %d -> %d ) ",
790 if (ns
.user_isp
!= os
.user_isp
)
791 pbp
+= sprintf(pbp
, "user_isp( %d -> %d ) ",
796 drbd_info(device
, "%s\n", pb
);
799 static void conn_pr_state_change(struct drbd_connection
*connection
, union drbd_state os
, union drbd_state ns
,
800 enum chg_state_flags flags
)
805 pbp
+= print_state_change(pbp
, os
, ns
, flags
);
807 if (is_susp(ns
) != is_susp(os
) && flags
& CS_DC_SUSP
)
808 pbp
+= sprintf(pbp
, "susp( %d -> %d ) ",
813 drbd_info(connection
, "%s\n", pb
);
818 * is_valid_state() - Returns an SS_ error code if ns is not valid
819 * @device: DRBD device.
820 * @ns: State to consider.
822 static enum drbd_state_rv
823 is_valid_state(struct drbd_device
*device
, union drbd_state ns
)
825 /* See drbd_state_sw_errors in drbd_strings.c */
827 enum drbd_fencing_p fp
;
828 enum drbd_state_rv rv
= SS_SUCCESS
;
833 if (get_ldev(device
)) {
834 fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
838 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
840 if (!nc
->two_primaries
&& ns
.role
== R_PRIMARY
) {
841 if (ns
.peer
== R_PRIMARY
)
842 rv
= SS_TWO_PRIMARIES
;
843 else if (conn_highest_peer(first_peer_device(device
)->connection
) == R_PRIMARY
)
844 rv
= SS_O_VOL_PEER_PRI
;
849 goto out
; /* already found a reason to abort */
850 else if (ns
.role
== R_SECONDARY
&& device
->open_cnt
)
851 rv
= SS_DEVICE_IN_USE
;
853 else if (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
)
854 rv
= SS_NO_UP_TO_DATE_DISK
;
856 else if (fp
>= FP_RESOURCE
&&
857 ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
>= D_UNKNOWN
)
860 else if (ns
.role
== R_PRIMARY
&& ns
.disk
<= D_INCONSISTENT
&& ns
.pdsk
<= D_INCONSISTENT
)
861 rv
= SS_NO_UP_TO_DATE_DISK
;
863 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_INCONSISTENT
)
864 rv
= SS_NO_LOCAL_DISK
;
866 else if (ns
.conn
> C_CONNECTED
&& ns
.pdsk
< D_INCONSISTENT
)
867 rv
= SS_NO_REMOTE_DISK
;
869 else if (ns
.conn
> C_CONNECTED
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
870 rv
= SS_NO_UP_TO_DATE_DISK
;
872 else if ((ns
.conn
== C_CONNECTED
||
873 ns
.conn
== C_WF_BITMAP_S
||
874 ns
.conn
== C_SYNC_SOURCE
||
875 ns
.conn
== C_PAUSED_SYNC_S
) &&
876 ns
.disk
== D_OUTDATED
)
877 rv
= SS_CONNECTED_OUTDATES
;
879 else if (nc
&& (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
880 (nc
->verify_alg
[0] == 0))
881 rv
= SS_NO_VERIFY_ALG
;
883 else if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
884 first_peer_device(device
)->connection
->agreed_pro_version
< 88)
885 rv
= SS_NOT_SUPPORTED
;
887 else if (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
)
888 rv
= SS_NO_UP_TO_DATE_DISK
;
890 else if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
891 ns
.pdsk
== D_UNKNOWN
)
892 rv
= SS_NEED_CONNECTION
;
894 else if (ns
.conn
>= C_CONNECTED
&& ns
.pdsk
== D_UNKNOWN
)
895 rv
= SS_CONNECTED_OUTDATES
;
904 * is_valid_soft_transition() - Returns an SS_ error code if the state transition is not possible
905 * This function limits state transitions that may be declined by DRBD. I.e.
906 * user requests (aka soft transitions).
909 * @connection: DRBD connection.
911 static enum drbd_state_rv
912 is_valid_soft_transition(union drbd_state os
, union drbd_state ns
, struct drbd_connection
*connection
)
914 enum drbd_state_rv rv
= SS_SUCCESS
;
916 if ((ns
.conn
== C_STARTING_SYNC_T
|| ns
.conn
== C_STARTING_SYNC_S
) &&
917 os
.conn
> C_CONNECTED
)
918 rv
= SS_RESYNC_RUNNING
;
920 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_STANDALONE
)
921 rv
= SS_ALREADY_STANDALONE
;
923 if (ns
.disk
> D_ATTACHING
&& os
.disk
== D_DISKLESS
)
926 if (ns
.conn
== C_WF_CONNECTION
&& os
.conn
< C_UNCONNECTED
)
927 rv
= SS_NO_NET_CONFIG
;
929 if (ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
&& os
.disk
!= D_ATTACHING
)
930 rv
= SS_LOWER_THAN_OUTDATED
;
932 if (ns
.conn
== C_DISCONNECTING
&& os
.conn
== C_UNCONNECTED
)
933 rv
= SS_IN_TRANSIENT_STATE
;
935 /* While establishing a connection only allow cstate to change.
936 Delay/refuse role changes, detach attach etc... (they do not touch cstate) */
937 if (test_bit(STATE_SENT
, &connection
->flags
) &&
938 !((ns
.conn
== C_WF_REPORT_PARAMS
&& os
.conn
== C_WF_CONNECTION
) ||
939 (ns
.conn
>= C_CONNECTED
&& os
.conn
== C_WF_REPORT_PARAMS
)))
940 rv
= SS_IN_TRANSIENT_STATE
;
942 /* Do not promote during resync handshake triggered by "force primary".
943 * This is a hack. It should really be rejected by the peer during the
944 * cluster wide state change request. */
945 if (os
.role
!= R_PRIMARY
&& ns
.role
== R_PRIMARY
946 && ns
.pdsk
== D_UP_TO_DATE
947 && ns
.disk
!= D_UP_TO_DATE
&& ns
.disk
!= D_DISKLESS
948 && (ns
.conn
<= C_WF_SYNC_UUID
|| ns
.conn
!= os
.conn
))
949 rv
= SS_IN_TRANSIENT_STATE
;
951 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) && os
.conn
< C_CONNECTED
)
952 rv
= SS_NEED_CONNECTION
;
954 if ((ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
) &&
955 ns
.conn
!= os
.conn
&& os
.conn
> C_CONNECTED
)
956 rv
= SS_RESYNC_RUNNING
;
958 if ((ns
.conn
== C_STARTING_SYNC_S
|| ns
.conn
== C_STARTING_SYNC_T
) &&
959 os
.conn
< C_CONNECTED
)
960 rv
= SS_NEED_CONNECTION
;
962 if ((ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)
963 && os
.conn
< C_WF_REPORT_PARAMS
)
964 rv
= SS_NEED_CONNECTION
; /* No NetworkFailure -> SyncTarget etc... */
966 if (ns
.conn
== C_DISCONNECTING
&& ns
.pdsk
== D_OUTDATED
&&
967 os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
)
968 rv
= SS_OUTDATE_WO_CONN
;
973 static enum drbd_state_rv
974 is_valid_conn_transition(enum drbd_conns oc
, enum drbd_conns nc
)
976 /* no change -> nothing to do, at least for the connection part */
978 return SS_NOTHING_TO_DO
;
980 /* disconnect of an unconfigured connection does not make sense */
981 if (oc
== C_STANDALONE
&& nc
== C_DISCONNECTING
)
982 return SS_ALREADY_STANDALONE
;
984 /* from C_STANDALONE, we start with C_UNCONNECTED */
985 if (oc
== C_STANDALONE
&& nc
!= C_UNCONNECTED
)
986 return SS_NEED_CONNECTION
;
988 /* When establishing a connection we need to go through WF_REPORT_PARAMS!
989 Necessary to do the right thing upon invalidate-remote on a disconnected resource */
990 if (oc
< C_WF_REPORT_PARAMS
&& nc
>= C_CONNECTED
)
991 return SS_NEED_CONNECTION
;
993 /* After a network error only C_UNCONNECTED or C_DISCONNECTING may follow. */
994 if (oc
>= C_TIMEOUT
&& oc
<= C_TEAR_DOWN
&& nc
!= C_UNCONNECTED
&& nc
!= C_DISCONNECTING
)
995 return SS_IN_TRANSIENT_STATE
;
997 /* After C_DISCONNECTING only C_STANDALONE may follow */
998 if (oc
== C_DISCONNECTING
&& nc
!= C_STANDALONE
)
999 return SS_IN_TRANSIENT_STATE
;
1006 * is_valid_transition() - Returns an SS_ error code if the state transition is not possible
1007 * This limits hard state transitions. Hard state transitions are facts there are
1008 * imposed on DRBD by the environment. E.g. disk broke or network broke down.
1009 * But those hard state transitions are still not allowed to do everything.
1013 static enum drbd_state_rv
1014 is_valid_transition(union drbd_state os
, union drbd_state ns
)
1016 enum drbd_state_rv rv
;
1018 rv
= is_valid_conn_transition(os
.conn
, ns
.conn
);
1020 /* we cannot fail (again) if we already detached */
1021 if (ns
.disk
== D_FAILED
&& os
.disk
== D_DISKLESS
)
1022 rv
= SS_IS_DISKLESS
;
1027 static void print_sanitize_warnings(struct drbd_device
*device
, enum sanitize_state_warnings warn
)
1029 static const char *msg_table
[] = {
1031 [ABORTED_ONLINE_VERIFY
] = "Online-verify aborted.",
1032 [ABORTED_RESYNC
] = "Resync aborted.",
1033 [CONNECTION_LOST_NEGOTIATING
] = "Connection lost while negotiating, no data!",
1034 [IMPLICITLY_UPGRADED_DISK
] = "Implicitly upgraded disk",
1035 [IMPLICITLY_UPGRADED_PDSK
] = "Implicitly upgraded pdsk",
1038 if (warn
!= NO_WARNING
)
1039 drbd_warn(device
, "%s\n", msg_table
[warn
]);
1043 * sanitize_state() - Resolves implicitly necessary additional changes to a state transition
1044 * @device: DRBD device.
1047 * @warn: placeholder for returned state warning.
1049 * When we loose connection, we have to set the state of the peers disk (pdsk)
1050 * to D_UNKNOWN. This rule and many more along those lines are in this function.
1052 static union drbd_state
sanitize_state(struct drbd_device
*device
, union drbd_state os
,
1053 union drbd_state ns
, enum sanitize_state_warnings
*warn
)
1055 enum drbd_fencing_p fp
;
1056 enum drbd_disk_state disk_min
, disk_max
, pdsk_min
, pdsk_max
;
1062 if (get_ldev(device
)) {
1064 fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
1069 /* Implications from connection to peer and peer_isp */
1070 if (ns
.conn
< C_CONNECTED
) {
1072 ns
.peer
= R_UNKNOWN
;
1073 if (ns
.pdsk
> D_UNKNOWN
|| ns
.pdsk
< D_INCONSISTENT
)
1074 ns
.pdsk
= D_UNKNOWN
;
1077 /* Clear the aftr_isp when becoming unconfigured */
1078 if (ns
.conn
== C_STANDALONE
&& ns
.disk
== D_DISKLESS
&& ns
.role
== R_SECONDARY
)
1081 /* An implication of the disk states onto the connection state */
1082 /* Abort resync if a disk fails/detaches */
1083 if (ns
.conn
> C_CONNECTED
&& (ns
.disk
<= D_FAILED
|| ns
.pdsk
<= D_FAILED
)) {
1085 *warn
= ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
?
1086 ABORTED_ONLINE_VERIFY
: ABORTED_RESYNC
;
1087 ns
.conn
= C_CONNECTED
;
1090 /* Connection breaks down before we finished "Negotiating" */
1091 if (ns
.conn
< C_CONNECTED
&& ns
.disk
== D_NEGOTIATING
&&
1092 get_ldev_if_state(device
, D_NEGOTIATING
)) {
1093 if (device
->ed_uuid
== device
->ldev
->md
.uuid
[UI_CURRENT
]) {
1094 ns
.disk
= device
->new_state_tmp
.disk
;
1095 ns
.pdsk
= device
->new_state_tmp
.pdsk
;
1098 *warn
= CONNECTION_LOST_NEGOTIATING
;
1099 ns
.disk
= D_DISKLESS
;
1100 ns
.pdsk
= D_UNKNOWN
;
1105 /* D_CONSISTENT and D_OUTDATED vanish when we get connected */
1106 if (ns
.conn
>= C_CONNECTED
&& ns
.conn
< C_AHEAD
) {
1107 if (ns
.disk
== D_CONSISTENT
|| ns
.disk
== D_OUTDATED
)
1108 ns
.disk
= D_UP_TO_DATE
;
1109 if (ns
.pdsk
== D_CONSISTENT
|| ns
.pdsk
== D_OUTDATED
)
1110 ns
.pdsk
= D_UP_TO_DATE
;
1113 /* Implications of the connection state on the disk states */
1114 disk_min
= D_DISKLESS
;
1115 disk_max
= D_UP_TO_DATE
;
1116 pdsk_min
= D_INCONSISTENT
;
1117 pdsk_max
= D_UNKNOWN
;
1118 switch ((enum drbd_conns
)ns
.conn
) {
1120 case C_PAUSED_SYNC_T
:
1121 case C_STARTING_SYNC_T
:
1122 case C_WF_SYNC_UUID
:
1124 disk_min
= D_INCONSISTENT
;
1125 disk_max
= D_OUTDATED
;
1126 pdsk_min
= D_UP_TO_DATE
;
1127 pdsk_max
= D_UP_TO_DATE
;
1131 disk_min
= D_UP_TO_DATE
;
1132 disk_max
= D_UP_TO_DATE
;
1133 pdsk_min
= D_UP_TO_DATE
;
1134 pdsk_max
= D_UP_TO_DATE
;
1137 disk_min
= D_DISKLESS
;
1138 disk_max
= D_UP_TO_DATE
;
1139 pdsk_min
= D_DISKLESS
;
1140 pdsk_max
= D_UP_TO_DATE
;
1143 case C_PAUSED_SYNC_S
:
1144 case C_STARTING_SYNC_S
:
1146 disk_min
= D_UP_TO_DATE
;
1147 disk_max
= D_UP_TO_DATE
;
1148 pdsk_min
= D_INCONSISTENT
;
1149 pdsk_max
= D_CONSISTENT
; /* D_OUTDATED would be nice. But explicit outdate necessary*/
1152 disk_min
= D_INCONSISTENT
;
1153 disk_max
= D_INCONSISTENT
;
1154 pdsk_min
= D_UP_TO_DATE
;
1155 pdsk_max
= D_UP_TO_DATE
;
1158 disk_min
= D_UP_TO_DATE
;
1159 disk_max
= D_UP_TO_DATE
;
1160 pdsk_min
= D_INCONSISTENT
;
1161 pdsk_max
= D_INCONSISTENT
;
1164 case C_DISCONNECTING
:
1168 case C_NETWORK_FAILURE
:
1169 case C_PROTOCOL_ERROR
:
1171 case C_WF_CONNECTION
:
1172 case C_WF_REPORT_PARAMS
:
1176 if (ns
.disk
> disk_max
)
1179 if (ns
.disk
< disk_min
) {
1181 *warn
= IMPLICITLY_UPGRADED_DISK
;
1184 if (ns
.pdsk
> pdsk_max
)
1187 if (ns
.pdsk
< pdsk_min
) {
1189 *warn
= IMPLICITLY_UPGRADED_PDSK
;
1193 if (fp
== FP_STONITH
&&
1194 (ns
.role
== R_PRIMARY
&& ns
.conn
< C_CONNECTED
&& ns
.pdsk
> D_OUTDATED
) &&
1195 !(os
.role
== R_PRIMARY
&& os
.conn
< C_CONNECTED
&& os
.pdsk
> D_OUTDATED
))
1196 ns
.susp_fen
= 1; /* Suspend IO while fence-peer handler runs (peer lost) */
1198 if (device
->resource
->res_opts
.on_no_data
== OND_SUSPEND_IO
&&
1199 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
) &&
1200 !(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
))
1201 ns
.susp_nod
= 1; /* Suspend IO while no data available (no accessible data available) */
1203 if (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
) {
1204 if (ns
.conn
== C_SYNC_SOURCE
)
1205 ns
.conn
= C_PAUSED_SYNC_S
;
1206 if (ns
.conn
== C_SYNC_TARGET
)
1207 ns
.conn
= C_PAUSED_SYNC_T
;
1209 if (ns
.conn
== C_PAUSED_SYNC_S
)
1210 ns
.conn
= C_SYNC_SOURCE
;
1211 if (ns
.conn
== C_PAUSED_SYNC_T
)
1212 ns
.conn
= C_SYNC_TARGET
;
1218 void drbd_resume_al(struct drbd_device
*device
)
1220 if (test_and_clear_bit(AL_SUSPENDED
, &device
->flags
))
1221 drbd_info(device
, "Resumed AL updates\n");
1224 /* helper for _drbd_set_state */
1225 static void set_ov_position(struct drbd_peer_device
*peer_device
, enum drbd_conns cs
)
1227 struct drbd_device
*device
= peer_device
->device
;
1229 if (peer_device
->connection
->agreed_pro_version
< 90)
1230 device
->ov_start_sector
= 0;
1231 device
->rs_total
= drbd_bm_bits(device
);
1232 device
->ov_position
= 0;
1233 if (cs
== C_VERIFY_T
) {
1234 /* starting online verify from an arbitrary position
1235 * does not fit well into the existing protocol.
1236 * on C_VERIFY_T, we initialize ov_left and friends
1237 * implicitly in receive_DataRequest once the
1238 * first P_OV_REQUEST is received */
1239 device
->ov_start_sector
= ~(sector_t
)0;
1241 unsigned long bit
= BM_SECT_TO_BIT(device
->ov_start_sector
);
1242 if (bit
>= device
->rs_total
) {
1243 device
->ov_start_sector
=
1244 BM_BIT_TO_SECT(device
->rs_total
- 1);
1245 device
->rs_total
= 1;
1247 device
->rs_total
-= bit
;
1248 device
->ov_position
= device
->ov_start_sector
;
1250 device
->ov_left
= device
->rs_total
;
1254 * _drbd_set_state() - Set a new DRBD state
1255 * @device: DRBD device.
1258 * @done: Optional completion, that will get completed after the after_state_ch() finished
1260 * Caller needs to hold req_lock. Do not call directly.
1263 _drbd_set_state(struct drbd_device
*device
, union drbd_state ns
,
1264 enum chg_state_flags flags
, struct completion
*done
)
1266 struct drbd_peer_device
*peer_device
= first_peer_device(device
);
1267 struct drbd_connection
*connection
= peer_device
? peer_device
->connection
: NULL
;
1268 union drbd_state os
;
1269 enum drbd_state_rv rv
= SS_SUCCESS
;
1270 enum sanitize_state_warnings ssw
;
1271 struct after_state_chg_work
*ascw
;
1272 struct drbd_state_change
*state_change
;
1274 os
= drbd_read_state(device
);
1276 ns
= sanitize_state(device
, os
, ns
, &ssw
);
1278 return SS_NOTHING_TO_DO
;
1280 rv
= is_valid_transition(os
, ns
);
1281 if (rv
< SS_SUCCESS
)
1284 if (!(flags
& CS_HARD
)) {
1285 /* pre-state-change checks ; only look at ns */
1286 /* See drbd_state_sw_errors in drbd_strings.c */
1288 rv
= is_valid_state(device
, ns
);
1289 if (rv
< SS_SUCCESS
) {
1290 /* If the old state was illegal as well, then let
1293 if (is_valid_state(device
, os
) == rv
)
1294 rv
= is_valid_soft_transition(os
, ns
, connection
);
1296 rv
= is_valid_soft_transition(os
, ns
, connection
);
1299 if (rv
< SS_SUCCESS
) {
1300 if (flags
& CS_VERBOSE
)
1301 print_st_err(device
, os
, ns
, rv
);
1305 print_sanitize_warnings(device
, ssw
);
1307 drbd_pr_state_change(device
, os
, ns
, flags
);
1309 /* Display changes to the susp* flags that where caused by the call to
1310 sanitize_state(). Only display it here if we where not called from
1311 _conn_request_state() */
1312 if (!(flags
& CS_DC_SUSP
))
1313 conn_pr_state_change(connection
, os
, ns
,
1314 (flags
& ~CS_DC_MASK
) | CS_DC_SUSP
);
1316 /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
1317 * on the ldev here, to be sure the transition -> D_DISKLESS resp.
1318 * drbd_ldev_destroy() won't happen before our corresponding
1319 * after_state_ch works run, where we put_ldev again. */
1320 if ((os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) ||
1321 (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
))
1322 atomic_inc(&device
->local_cnt
);
1324 if (!is_sync_state(os
.conn
) && is_sync_state(ns
.conn
))
1325 clear_bit(RS_DONE
, &device
->flags
);
1327 /* FIXME: Have any flags been set earlier in this function already? */
1328 state_change
= remember_old_state(device
->resource
, GFP_ATOMIC
);
1330 /* changes to local_cnt and device flags should be visible before
1331 * changes to state, which again should be visible before anything else
1332 * depending on that change happens. */
1334 device
->state
.i
= ns
.i
;
1335 device
->resource
->susp
= ns
.susp
;
1336 device
->resource
->susp_nod
= ns
.susp_nod
;
1337 device
->resource
->susp_fen
= ns
.susp_fen
;
1340 remember_new_state(state_change
);
1342 /* put replicated vs not-replicated requests in seperate epochs */
1343 if (drbd_should_do_remote((union drbd_dev_state
)os
.i
) !=
1344 drbd_should_do_remote((union drbd_dev_state
)ns
.i
))
1345 start_new_tl_epoch(connection
);
1347 if (os
.disk
== D_ATTACHING
&& ns
.disk
>= D_NEGOTIATING
)
1348 drbd_print_uuids(device
, "attached to UUIDs");
1350 /* Wake up role changes, that were delayed because of connection establishing */
1351 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
!= C_WF_REPORT_PARAMS
&&
1352 no_peer_wf_report_params(connection
)) {
1353 clear_bit(STATE_SENT
, &connection
->flags
);
1354 wake_up_all_devices(connection
);
1357 wake_up(&device
->misc_wait
);
1358 wake_up(&device
->state_wait
);
1359 wake_up(&connection
->ping_wait
);
1361 /* Aborted verify run, or we reached the stop sector.
1362 * Log the last position, unless end-of-device. */
1363 if ((os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) &&
1364 ns
.conn
<= C_CONNECTED
) {
1365 device
->ov_start_sector
=
1366 BM_BIT_TO_SECT(drbd_bm_bits(device
) - device
->ov_left
);
1367 if (device
->ov_left
)
1368 drbd_info(device
, "Online Verify reached sector %llu\n",
1369 (unsigned long long)device
->ov_start_sector
);
1372 if ((os
.conn
== C_PAUSED_SYNC_T
|| os
.conn
== C_PAUSED_SYNC_S
) &&
1373 (ns
.conn
== C_SYNC_TARGET
|| ns
.conn
== C_SYNC_SOURCE
)) {
1374 drbd_info(device
, "Syncer continues.\n");
1375 device
->rs_paused
+= (long)jiffies
1376 -(long)device
->rs_mark_time
[device
->rs_last_mark
];
1377 if (ns
.conn
== C_SYNC_TARGET
)
1378 mod_timer(&device
->resync_timer
, jiffies
);
1381 if ((os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_SYNC_SOURCE
) &&
1382 (ns
.conn
== C_PAUSED_SYNC_T
|| ns
.conn
== C_PAUSED_SYNC_S
)) {
1383 drbd_info(device
, "Resync suspended\n");
1384 device
->rs_mark_time
[device
->rs_last_mark
] = jiffies
;
1387 if (os
.conn
== C_CONNECTED
&&
1388 (ns
.conn
== C_VERIFY_S
|| ns
.conn
== C_VERIFY_T
)) {
1389 unsigned long now
= jiffies
;
1392 set_ov_position(peer_device
, ns
.conn
);
1393 device
->rs_start
= now
;
1394 device
->rs_last_sect_ev
= 0;
1395 device
->ov_last_oos_size
= 0;
1396 device
->ov_last_oos_start
= 0;
1398 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1399 device
->rs_mark_left
[i
] = device
->ov_left
;
1400 device
->rs_mark_time
[i
] = now
;
1403 drbd_rs_controller_reset(peer_device
);
1405 if (ns
.conn
== C_VERIFY_S
) {
1406 drbd_info(device
, "Starting Online Verify from sector %llu\n",
1407 (unsigned long long)device
->ov_position
);
1408 mod_timer(&device
->resync_timer
, jiffies
);
1412 if (get_ldev(device
)) {
1413 u32 mdf
= device
->ldev
->md
.flags
& ~(MDF_CONSISTENT
|MDF_PRIMARY_IND
|
1414 MDF_CONNECTED_IND
|MDF_WAS_UP_TO_DATE
|
1415 MDF_PEER_OUT_DATED
|MDF_CRASHED_PRIMARY
);
1417 mdf
&= ~MDF_AL_CLEAN
;
1418 if (test_bit(CRASHED_PRIMARY
, &device
->flags
))
1419 mdf
|= MDF_CRASHED_PRIMARY
;
1420 if (device
->state
.role
== R_PRIMARY
||
1421 (device
->state
.pdsk
< D_INCONSISTENT
&& device
->state
.peer
== R_PRIMARY
))
1422 mdf
|= MDF_PRIMARY_IND
;
1423 if (device
->state
.conn
> C_WF_REPORT_PARAMS
)
1424 mdf
|= MDF_CONNECTED_IND
;
1425 if (device
->state
.disk
> D_INCONSISTENT
)
1426 mdf
|= MDF_CONSISTENT
;
1427 if (device
->state
.disk
> D_OUTDATED
)
1428 mdf
|= MDF_WAS_UP_TO_DATE
;
1429 if (device
->state
.pdsk
<= D_OUTDATED
&& device
->state
.pdsk
>= D_INCONSISTENT
)
1430 mdf
|= MDF_PEER_OUT_DATED
;
1431 if (mdf
!= device
->ldev
->md
.flags
) {
1432 device
->ldev
->md
.flags
= mdf
;
1433 drbd_md_mark_dirty(device
);
1435 if (os
.disk
< D_CONSISTENT
&& ns
.disk
>= D_CONSISTENT
)
1436 drbd_set_ed_uuid(device
, device
->ldev
->md
.uuid
[UI_CURRENT
]);
1440 /* Peer was forced D_UP_TO_DATE & R_PRIMARY, consider to resync */
1441 if (os
.disk
== D_INCONSISTENT
&& os
.pdsk
== D_INCONSISTENT
&&
1442 os
.peer
== R_SECONDARY
&& ns
.peer
== R_PRIMARY
)
1443 set_bit(CONSIDER_RESYNC
, &device
->flags
);
1445 /* Receiver should clean up itself */
1446 if (os
.conn
!= C_DISCONNECTING
&& ns
.conn
== C_DISCONNECTING
)
1447 drbd_thread_stop_nowait(&connection
->receiver
);
1449 /* Now the receiver finished cleaning up itself, it should die */
1450 if (os
.conn
!= C_STANDALONE
&& ns
.conn
== C_STANDALONE
)
1451 drbd_thread_stop_nowait(&connection
->receiver
);
1453 /* Upon network failure, we need to restart the receiver. */
1454 if (os
.conn
> C_WF_CONNECTION
&&
1455 ns
.conn
<= C_TEAR_DOWN
&& ns
.conn
>= C_TIMEOUT
)
1456 drbd_thread_restart_nowait(&connection
->receiver
);
1458 /* Resume AL writing if we get a connection */
1459 if (os
.conn
< C_CONNECTED
&& ns
.conn
>= C_CONNECTED
) {
1460 drbd_resume_al(device
);
1461 connection
->connect_cnt
++;
1464 /* remember last attach time so request_timer_fn() won't
1465 * kill newly established sessions while we are still trying to thaw
1466 * previously frozen IO */
1467 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1468 ns
.disk
> D_NEGOTIATING
)
1469 device
->last_reattach_jif
= jiffies
;
1471 ascw
= kmalloc(sizeof(*ascw
), GFP_ATOMIC
);
1475 ascw
->flags
= flags
;
1476 ascw
->w
.cb
= w_after_state_ch
;
1477 ascw
->device
= device
;
1479 ascw
->state_change
= state_change
;
1480 drbd_queue_work(&connection
->sender_work
,
1483 drbd_err(device
, "Could not kmalloc an ascw\n");
1489 static int w_after_state_ch(struct drbd_work
*w
, int unused
)
1491 struct after_state_chg_work
*ascw
=
1492 container_of(w
, struct after_state_chg_work
, w
);
1493 struct drbd_device
*device
= ascw
->device
;
1495 after_state_ch(device
, ascw
->os
, ascw
->ns
, ascw
->flags
, ascw
->state_change
);
1496 forget_state_change(ascw
->state_change
);
1497 if (ascw
->flags
& CS_WAIT_COMPLETE
)
1498 complete(ascw
->done
);
1504 static void abw_start_sync(struct drbd_device
*device
, int rv
)
1507 drbd_err(device
, "Writing the bitmap failed not starting resync.\n");
1508 _drbd_request_state(device
, NS(conn
, C_CONNECTED
), CS_VERBOSE
);
1512 switch (device
->state
.conn
) {
1513 case C_STARTING_SYNC_T
:
1514 _drbd_request_state(device
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
);
1516 case C_STARTING_SYNC_S
:
1517 drbd_start_resync(device
, C_SYNC_SOURCE
);
1522 int drbd_bitmap_io_from_worker(struct drbd_device
*device
,
1523 int (*io_fn
)(struct drbd_device
*, struct drbd_peer_device
*),
1524 char *why
, enum bm_flag flags
,
1525 struct drbd_peer_device
*peer_device
)
1529 D_ASSERT(device
, current
== first_peer_device(device
)->connection
->worker
.task
);
1531 /* open coded non-blocking drbd_suspend_io(device); */
1532 atomic_inc(&device
->suspend_cnt
);
1534 drbd_bm_lock(device
, why
, flags
);
1535 rv
= io_fn(device
, peer_device
);
1536 drbd_bm_unlock(device
);
1538 drbd_resume_io(device
);
1543 int notify_resource_state_change(struct sk_buff
*skb
,
1546 enum drbd_notification_type type
)
1548 struct drbd_resource_state_change
*resource_state_change
= state_change
;
1549 struct drbd_resource
*resource
= resource_state_change
->resource
;
1550 struct resource_info resource_info
= {
1551 .res_role
= resource_state_change
->role
[NEW
],
1552 .res_susp
= resource_state_change
->susp
[NEW
],
1553 .res_susp_nod
= resource_state_change
->susp_nod
[NEW
],
1554 .res_susp_fen
= resource_state_change
->susp_fen
[NEW
],
1557 return notify_resource_state(skb
, seq
, resource
, &resource_info
, type
);
1560 int notify_connection_state_change(struct sk_buff
*skb
,
1563 enum drbd_notification_type type
)
1565 struct drbd_connection_state_change
*p
= state_change
;
1566 struct drbd_connection
*connection
= p
->connection
;
1567 struct connection_info connection_info
= {
1568 .conn_connection_state
= p
->cstate
[NEW
],
1569 .conn_role
= p
->peer_role
[NEW
],
1572 return notify_connection_state(skb
, seq
, connection
, &connection_info
, type
);
1575 int notify_device_state_change(struct sk_buff
*skb
,
1578 enum drbd_notification_type type
)
1580 struct drbd_device_state_change
*device_state_change
= state_change
;
1581 struct drbd_device
*device
= device_state_change
->device
;
1582 struct device_info device_info
= {
1583 .dev_disk_state
= device_state_change
->disk_state
[NEW
],
1586 return notify_device_state(skb
, seq
, device
, &device_info
, type
);
1589 int notify_peer_device_state_change(struct sk_buff
*skb
,
1592 enum drbd_notification_type type
)
1594 struct drbd_peer_device_state_change
*p
= state_change
;
1595 struct drbd_peer_device
*peer_device
= p
->peer_device
;
1596 struct peer_device_info peer_device_info
= {
1597 .peer_repl_state
= p
->repl_state
[NEW
],
1598 .peer_disk_state
= p
->disk_state
[NEW
],
1599 .peer_resync_susp_user
= p
->resync_susp_user
[NEW
],
1600 .peer_resync_susp_peer
= p
->resync_susp_peer
[NEW
],
1601 .peer_resync_susp_dependency
= p
->resync_susp_dependency
[NEW
],
1604 return notify_peer_device_state(skb
, seq
, peer_device
, &peer_device_info
, type
);
1607 static void broadcast_state_change(struct drbd_state_change
*state_change
)
1609 struct drbd_resource_state_change
*resource_state_change
= &state_change
->resource
[0];
1610 bool resource_state_has_changed
;
1611 unsigned int n_device
, n_connection
, n_peer_device
, n_peer_devices
;
1612 int (*last_func
)(struct sk_buff
*, unsigned int,
1613 void *, enum drbd_notification_type
) = NULL
;
1614 void *last_arg
= NULL
;
1616 #define HAS_CHANGED(state) ((state)[OLD] != (state)[NEW])
1617 #define FINAL_STATE_CHANGE(type) \
1619 last_func(NULL, 0, last_arg, type); \
1621 #define REMEMBER_STATE_CHANGE(func, arg, type) \
1622 ({ FINAL_STATE_CHANGE(type | NOTIFY_CONTINUES); \
1627 mutex_lock(¬ification_mutex
);
1629 resource_state_has_changed
=
1630 HAS_CHANGED(resource_state_change
->role
) ||
1631 HAS_CHANGED(resource_state_change
->susp
) ||
1632 HAS_CHANGED(resource_state_change
->susp_nod
) ||
1633 HAS_CHANGED(resource_state_change
->susp_fen
);
1635 if (resource_state_has_changed
)
1636 REMEMBER_STATE_CHANGE(notify_resource_state_change
,
1637 resource_state_change
, NOTIFY_CHANGE
);
1639 for (n_connection
= 0; n_connection
< state_change
->n_connections
; n_connection
++) {
1640 struct drbd_connection_state_change
*connection_state_change
=
1641 &state_change
->connections
[n_connection
];
1643 if (HAS_CHANGED(connection_state_change
->peer_role
) ||
1644 HAS_CHANGED(connection_state_change
->cstate
))
1645 REMEMBER_STATE_CHANGE(notify_connection_state_change
,
1646 connection_state_change
, NOTIFY_CHANGE
);
1649 for (n_device
= 0; n_device
< state_change
->n_devices
; n_device
++) {
1650 struct drbd_device_state_change
*device_state_change
=
1651 &state_change
->devices
[n_device
];
1653 if (HAS_CHANGED(device_state_change
->disk_state
))
1654 REMEMBER_STATE_CHANGE(notify_device_state_change
,
1655 device_state_change
, NOTIFY_CHANGE
);
1658 n_peer_devices
= state_change
->n_devices
* state_change
->n_connections
;
1659 for (n_peer_device
= 0; n_peer_device
< n_peer_devices
; n_peer_device
++) {
1660 struct drbd_peer_device_state_change
*p
=
1661 &state_change
->peer_devices
[n_peer_device
];
1663 if (HAS_CHANGED(p
->disk_state
) ||
1664 HAS_CHANGED(p
->repl_state
) ||
1665 HAS_CHANGED(p
->resync_susp_user
) ||
1666 HAS_CHANGED(p
->resync_susp_peer
) ||
1667 HAS_CHANGED(p
->resync_susp_dependency
))
1668 REMEMBER_STATE_CHANGE(notify_peer_device_state_change
,
1672 FINAL_STATE_CHANGE(NOTIFY_CHANGE
);
1673 mutex_unlock(¬ification_mutex
);
1676 #undef FINAL_STATE_CHANGE
1677 #undef REMEMBER_STATE_CHANGE
1680 /* takes old and new peer disk state */
1681 static bool lost_contact_to_peer_data(enum drbd_disk_state os
, enum drbd_disk_state ns
)
1683 if ((os
>= D_INCONSISTENT
&& os
!= D_UNKNOWN
&& os
!= D_OUTDATED
)
1684 && (ns
< D_INCONSISTENT
|| ns
== D_UNKNOWN
|| ns
== D_OUTDATED
))
1687 /* Scenario, starting with normal operation
1688 * Connected Primary/Secondary UpToDate/UpToDate
1689 * NetworkFailure Primary/Unknown UpToDate/DUnknown (frozen)
1691 * Connected Primary/Secondary UpToDate/Diskless (resumed; needs to bump uuid!)
1694 && (ns
== D_DISKLESS
|| ns
== D_FAILED
|| ns
== D_OUTDATED
))
1701 * after_state_ch() - Perform after state change actions that may sleep
1702 * @device: DRBD device.
1706 * @state_change: state change to broadcast
1708 static void after_state_ch(struct drbd_device
*device
, union drbd_state os
,
1709 union drbd_state ns
, enum chg_state_flags flags
,
1710 struct drbd_state_change
*state_change
)
1712 struct drbd_resource
*resource
= device
->resource
;
1713 struct drbd_peer_device
*peer_device
= first_peer_device(device
);
1714 struct drbd_connection
*connection
= peer_device
? peer_device
->connection
: NULL
;
1715 struct sib_info sib
;
1717 broadcast_state_change(state_change
);
1719 sib
.sib_reason
= SIB_STATE_CHANGE
;
1723 if ((os
.disk
!= D_UP_TO_DATE
|| os
.pdsk
!= D_UP_TO_DATE
)
1724 && (ns
.disk
== D_UP_TO_DATE
&& ns
.pdsk
== D_UP_TO_DATE
)) {
1725 clear_bit(CRASHED_PRIMARY
, &device
->flags
);
1727 device
->p_uuid
[UI_FLAGS
] &= ~((u64
)2);
1730 /* Inform userspace about the change... */
1731 drbd_bcast_event(device
, &sib
);
1733 if (!(os
.role
== R_PRIMARY
&& os
.disk
< D_UP_TO_DATE
&& os
.pdsk
< D_UP_TO_DATE
) &&
1734 (ns
.role
== R_PRIMARY
&& ns
.disk
< D_UP_TO_DATE
&& ns
.pdsk
< D_UP_TO_DATE
))
1735 drbd_khelper(device
, "pri-on-incon-degr");
1737 /* Here we have the actions that are performed after a
1738 state change. This function might sleep */
1741 enum drbd_req_event what
= NOTHING
;
1743 spin_lock_irq(&device
->resource
->req_lock
);
1744 if (os
.conn
< C_CONNECTED
&& conn_lowest_conn(connection
) >= C_CONNECTED
)
1747 if ((os
.disk
== D_ATTACHING
|| os
.disk
== D_NEGOTIATING
) &&
1748 conn_lowest_disk(connection
) == D_UP_TO_DATE
)
1749 what
= RESTART_FROZEN_DISK_IO
;
1751 if (resource
->susp_nod
&& what
!= NOTHING
) {
1752 _tl_restart(connection
, what
);
1753 _conn_request_state(connection
,
1754 (union drbd_state
) { { .susp_nod
= 1 } },
1755 (union drbd_state
) { { .susp_nod
= 0 } },
1758 spin_unlock_irq(&device
->resource
->req_lock
);
1762 spin_lock_irq(&device
->resource
->req_lock
);
1763 if (resource
->susp_fen
&& conn_lowest_conn(connection
) >= C_CONNECTED
) {
1764 /* case2: The connection was established again: */
1765 struct drbd_peer_device
*peer_device
;
1769 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
1770 clear_bit(NEW_CUR_UUID
, &peer_device
->device
->flags
);
1773 /* We should actively create a new uuid, _before_
1774 * we resume/resent, if the peer is diskless
1775 * (recovery from a multiple error scenario).
1776 * Currently, this happens with a slight delay
1777 * below when checking lost_contact_to_peer_data() ...
1779 _tl_restart(connection
, RESEND
);
1780 _conn_request_state(connection
,
1781 (union drbd_state
) { { .susp_fen
= 1 } },
1782 (union drbd_state
) { { .susp_fen
= 0 } },
1785 spin_unlock_irq(&device
->resource
->req_lock
);
1788 /* Became sync source. With protocol >= 96, we still need to send out
1789 * the sync uuid now. Need to do that before any drbd_send_state, or
1790 * the other side may go "paused sync" before receiving the sync uuids,
1791 * which is unexpected. */
1792 if ((os
.conn
!= C_SYNC_SOURCE
&& os
.conn
!= C_PAUSED_SYNC_S
) &&
1793 (ns
.conn
== C_SYNC_SOURCE
|| ns
.conn
== C_PAUSED_SYNC_S
) &&
1794 connection
->agreed_pro_version
>= 96 && get_ldev(device
)) {
1795 drbd_gen_and_send_sync_uuid(peer_device
);
1799 /* Do not change the order of the if above and the two below... */
1800 if (os
.pdsk
== D_DISKLESS
&&
1801 ns
.pdsk
> D_DISKLESS
&& ns
.pdsk
!= D_UNKNOWN
) { /* attach on the peer */
1802 /* we probably will start a resync soon.
1803 * make sure those things are properly reset. */
1804 device
->rs_total
= 0;
1805 device
->rs_failed
= 0;
1806 atomic_set(&device
->rs_pending_cnt
, 0);
1807 drbd_rs_cancel_all(device
);
1809 drbd_send_uuids(peer_device
);
1810 drbd_send_state(peer_device
, ns
);
1812 /* No point in queuing send_bitmap if we don't have a connection
1813 * anymore, so check also the _current_ state, not only the new state
1814 * at the time this work was queued. */
1815 if (os
.conn
!= C_WF_BITMAP_S
&& ns
.conn
== C_WF_BITMAP_S
&&
1816 device
->state
.conn
== C_WF_BITMAP_S
)
1817 drbd_queue_bitmap_io(device
, &drbd_send_bitmap
, NULL
,
1818 "send_bitmap (WFBitMapS)",
1819 BM_LOCKED_TEST_ALLOWED
, peer_device
);
1821 /* Lost contact to peer's copy of the data */
1822 if (lost_contact_to_peer_data(os
.pdsk
, ns
.pdsk
)) {
1823 if (get_ldev(device
)) {
1824 if ((ns
.role
== R_PRIMARY
|| ns
.peer
== R_PRIMARY
) &&
1825 device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1826 if (drbd_suspended(device
)) {
1827 set_bit(NEW_CUR_UUID
, &device
->flags
);
1829 drbd_uuid_new_current(device
);
1830 drbd_send_uuids(peer_device
);
1837 if (ns
.pdsk
< D_INCONSISTENT
&& get_ldev(device
)) {
1838 if (os
.peer
!= R_PRIMARY
&& ns
.peer
== R_PRIMARY
&&
1839 device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && ns
.disk
>= D_UP_TO_DATE
) {
1840 drbd_uuid_new_current(device
);
1841 drbd_send_uuids(peer_device
);
1843 /* D_DISKLESS Peer becomes secondary */
1844 if (os
.peer
== R_PRIMARY
&& ns
.peer
== R_SECONDARY
)
1845 /* We may still be Primary ourselves.
1846 * No harm done if the bitmap still changes,
1847 * redirtied pages will follow later. */
1848 drbd_bitmap_io_from_worker(device
, &drbd_bm_write
,
1849 "demote diskless peer", BM_LOCKED_SET_ALLOWED
, peer_device
);
1853 /* Write out all changed bits on demote.
1854 * Though, no need to da that just yet
1855 * if there is a resync going on still */
1856 if (os
.role
== R_PRIMARY
&& ns
.role
== R_SECONDARY
&&
1857 device
->state
.conn
<= C_CONNECTED
&& get_ldev(device
)) {
1858 /* No changes to the bitmap expected this time, so assert that,
1859 * even though no harm was done if it did change. */
1860 drbd_bitmap_io_from_worker(device
, &drbd_bm_write
,
1861 "demote", BM_LOCKED_TEST_ALLOWED
, peer_device
);
1865 /* Last part of the attaching process ... */
1866 if (ns
.conn
>= C_CONNECTED
&&
1867 os
.disk
== D_ATTACHING
&& ns
.disk
== D_NEGOTIATING
) {
1868 drbd_send_sizes(peer_device
, 0, 0); /* to start sync... */
1869 drbd_send_uuids(peer_device
);
1870 drbd_send_state(peer_device
, ns
);
1873 /* We want to pause/continue resync, tell peer. */
1874 if (ns
.conn
>= C_CONNECTED
&&
1875 ((os
.aftr_isp
!= ns
.aftr_isp
) ||
1876 (os
.user_isp
!= ns
.user_isp
)))
1877 drbd_send_state(peer_device
, ns
);
1879 /* In case one of the isp bits got set, suspend other devices. */
1880 if ((!os
.aftr_isp
&& !os
.peer_isp
&& !os
.user_isp
) &&
1881 (ns
.aftr_isp
|| ns
.peer_isp
|| ns
.user_isp
))
1882 suspend_other_sg(device
);
1884 /* Make sure the peer gets informed about eventual state
1885 changes (ISP bits) while we were in WFReportParams. */
1886 if (os
.conn
== C_WF_REPORT_PARAMS
&& ns
.conn
>= C_CONNECTED
)
1887 drbd_send_state(peer_device
, ns
);
1889 if (os
.conn
!= C_AHEAD
&& ns
.conn
== C_AHEAD
)
1890 drbd_send_state(peer_device
, ns
);
1892 /* We are in the progress to start a full sync... */
1893 if ((os
.conn
!= C_STARTING_SYNC_T
&& ns
.conn
== C_STARTING_SYNC_T
) ||
1894 (os
.conn
!= C_STARTING_SYNC_S
&& ns
.conn
== C_STARTING_SYNC_S
))
1895 /* no other bitmap changes expected during this phase */
1896 drbd_queue_bitmap_io(device
,
1897 &drbd_bmio_set_n_write
, &abw_start_sync
,
1898 "set_n_write from StartingSync", BM_LOCKED_TEST_ALLOWED
,
1901 /* first half of local IO error, failure to attach,
1902 * or administrative detach */
1903 if (os
.disk
!= D_FAILED
&& ns
.disk
== D_FAILED
) {
1904 enum drbd_io_error_p eh
= EP_PASS_ON
;
1905 int was_io_error
= 0;
1906 /* corresponding get_ldev was in _drbd_set_state, to serialize
1907 * our cleanup here with the transition to D_DISKLESS.
1908 * But is is still not save to dreference ldev here, since
1909 * we might come from an failed Attach before ldev was set. */
1912 eh
= rcu_dereference(device
->ldev
->disk_conf
)->on_io_error
;
1915 was_io_error
= test_and_clear_bit(WAS_IO_ERROR
, &device
->flags
);
1917 /* Intentionally call this handler first, before drbd_send_state().
1918 * See: 2932204 drbd: call local-io-error handler early
1919 * People may chose to hard-reset the box from this handler.
1920 * It is useful if this looks like a "regular node crash". */
1921 if (was_io_error
&& eh
== EP_CALL_HELPER
)
1922 drbd_khelper(device
, "local-io-error");
1924 /* Immediately allow completion of all application IO,
1925 * that waits for completion from the local disk,
1926 * if this was a force-detach due to disk_timeout
1927 * or administrator request (drbdsetup detach --force).
1928 * Do NOT abort otherwise.
1929 * Aborting local requests may cause serious problems,
1930 * if requests are completed to upper layers already,
1931 * and then later the already submitted local bio completes.
1932 * This can cause DMA into former bio pages that meanwhile
1933 * have been re-used for other things.
1934 * So aborting local requests may cause crashes,
1935 * or even worse, silent data corruption.
1937 if (test_and_clear_bit(FORCE_DETACH
, &device
->flags
))
1938 tl_abort_disk_io(device
);
1940 /* current state still has to be D_FAILED,
1941 * there is only one way out: to D_DISKLESS,
1942 * and that may only happen after our put_ldev below. */
1943 if (device
->state
.disk
!= D_FAILED
)
1945 "ASSERT FAILED: disk is %s during detach\n",
1946 drbd_disk_str(device
->state
.disk
));
1948 if (ns
.conn
>= C_CONNECTED
)
1949 drbd_send_state(peer_device
, ns
);
1951 drbd_rs_cancel_all(device
);
1953 /* In case we want to get something to stable storage still,
1954 * this may be the last chance.
1955 * Following put_ldev may transition to D_DISKLESS. */
1956 drbd_md_sync(device
);
1961 /* second half of local IO error, failure to attach,
1962 * or administrative detach,
1963 * after local_cnt references have reached zero again */
1964 if (os
.disk
!= D_DISKLESS
&& ns
.disk
== D_DISKLESS
) {
1965 /* We must still be diskless,
1966 * re-attach has to be serialized with this! */
1967 if (device
->state
.disk
!= D_DISKLESS
)
1969 "ASSERT FAILED: disk is %s while going diskless\n",
1970 drbd_disk_str(device
->state
.disk
));
1972 if (ns
.conn
>= C_CONNECTED
)
1973 drbd_send_state(peer_device
, ns
);
1974 /* corresponding get_ldev in __drbd_set_state
1975 * this may finally trigger drbd_ldev_destroy. */
1979 /* Notify peer that I had a local IO error, and did not detached.. */
1980 if (os
.disk
== D_UP_TO_DATE
&& ns
.disk
== D_INCONSISTENT
&& ns
.conn
>= C_CONNECTED
)
1981 drbd_send_state(peer_device
, ns
);
1983 /* Disks got bigger while they were detached */
1984 if (ns
.disk
> D_NEGOTIATING
&& ns
.pdsk
> D_NEGOTIATING
&&
1985 test_and_clear_bit(RESYNC_AFTER_NEG
, &device
->flags
)) {
1986 if (ns
.conn
== C_CONNECTED
)
1987 resync_after_online_grow(device
);
1990 /* A resync finished or aborted, wake paused devices... */
1991 if ((os
.conn
> C_CONNECTED
&& ns
.conn
<= C_CONNECTED
) ||
1992 (os
.peer_isp
&& !ns
.peer_isp
) ||
1993 (os
.user_isp
&& !ns
.user_isp
))
1994 resume_next_sg(device
);
1996 /* sync target done with resync. Explicitly notify peer, even though
1997 * it should (at least for non-empty resyncs) already know itself. */
1998 if (os
.disk
< D_UP_TO_DATE
&& os
.conn
>= C_SYNC_SOURCE
&& ns
.conn
== C_CONNECTED
)
1999 drbd_send_state(peer_device
, ns
);
2001 /* Verify finished, or reached stop sector. Peer did not know about
2002 * the stop sector, and we may even have changed the stop sector during
2003 * verify to interrupt/stop early. Send the new state. */
2004 if (os
.conn
== C_VERIFY_S
&& ns
.conn
== C_CONNECTED
2005 && verify_can_do_stop_sector(device
))
2006 drbd_send_state(peer_device
, ns
);
2008 /* This triggers bitmap writeout of potentially still unwritten pages
2009 * if the resync finished cleanly, or aborted because of peer disk
2010 * failure, or on transition from resync back to AHEAD/BEHIND.
2012 * Connection loss is handled in drbd_disconnected() by the receiver.
2014 * For resync aborted because of local disk failure, we cannot do
2015 * any bitmap writeout anymore.
2017 * No harm done if some bits change during this phase.
2019 if ((os
.conn
> C_CONNECTED
&& os
.conn
< C_AHEAD
) &&
2020 (ns
.conn
== C_CONNECTED
|| ns
.conn
>= C_AHEAD
) && get_ldev(device
)) {
2021 drbd_queue_bitmap_io(device
, &drbd_bm_write_copy_pages
, NULL
,
2022 "write from resync_finished", BM_LOCKED_CHANGE_ALLOWED
,
2027 if (ns
.disk
== D_DISKLESS
&&
2028 ns
.conn
== C_STANDALONE
&&
2029 ns
.role
== R_SECONDARY
) {
2030 if (os
.aftr_isp
!= ns
.aftr_isp
)
2031 resume_next_sg(device
);
2034 drbd_md_sync(device
);
2037 struct after_conn_state_chg_work
{
2040 union drbd_state ns_min
;
2041 union drbd_state ns_max
; /* new, max state, over all devices */
2042 enum chg_state_flags flags
;
2043 struct drbd_connection
*connection
;
2044 struct drbd_state_change
*state_change
;
2047 static int w_after_conn_state_ch(struct drbd_work
*w
, int unused
)
2049 struct after_conn_state_chg_work
*acscw
=
2050 container_of(w
, struct after_conn_state_chg_work
, w
);
2051 struct drbd_connection
*connection
= acscw
->connection
;
2052 enum drbd_conns oc
= acscw
->oc
;
2053 union drbd_state ns_max
= acscw
->ns_max
;
2054 struct drbd_peer_device
*peer_device
;
2057 broadcast_state_change(acscw
->state_change
);
2058 forget_state_change(acscw
->state_change
);
2061 /* Upon network configuration, we need to start the receiver */
2062 if (oc
== C_STANDALONE
&& ns_max
.conn
== C_UNCONNECTED
)
2063 drbd_thread_start(&connection
->receiver
);
2065 if (oc
== C_DISCONNECTING
&& ns_max
.conn
== C_STANDALONE
) {
2066 struct net_conf
*old_conf
;
2068 mutex_lock(¬ification_mutex
);
2069 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
2070 notify_peer_device_state(NULL
, 0, peer_device
, NULL
,
2071 NOTIFY_DESTROY
| NOTIFY_CONTINUES
);
2072 notify_connection_state(NULL
, 0, connection
, NULL
, NOTIFY_DESTROY
);
2073 mutex_unlock(¬ification_mutex
);
2075 mutex_lock(&connection
->resource
->conf_update
);
2076 old_conf
= connection
->net_conf
;
2077 connection
->my_addr_len
= 0;
2078 connection
->peer_addr_len
= 0;
2079 RCU_INIT_POINTER(connection
->net_conf
, NULL
);
2080 conn_free_crypto(connection
);
2081 mutex_unlock(&connection
->resource
->conf_update
);
2083 kvfree_rcu_mightsleep(old_conf
);
2086 if (ns_max
.susp_fen
) {
2087 /* case1: The outdate peer handler is successful: */
2088 if (ns_max
.pdsk
<= D_OUTDATED
) {
2090 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2091 struct drbd_device
*device
= peer_device
->device
;
2092 if (test_bit(NEW_CUR_UUID
, &device
->flags
)) {
2093 drbd_uuid_new_current(device
);
2094 clear_bit(NEW_CUR_UUID
, &device
->flags
);
2098 spin_lock_irq(&connection
->resource
->req_lock
);
2099 _tl_restart(connection
, CONNECTION_LOST_WHILE_PENDING
);
2100 _conn_request_state(connection
,
2101 (union drbd_state
) { { .susp_fen
= 1 } },
2102 (union drbd_state
) { { .susp_fen
= 0 } },
2104 spin_unlock_irq(&connection
->resource
->req_lock
);
2107 conn_md_sync(connection
);
2108 kref_put(&connection
->kref
, drbd_destroy_connection
);
2113 static void conn_old_common_state(struct drbd_connection
*connection
, union drbd_state
*pcs
, enum chg_state_flags
*pf
)
2115 enum chg_state_flags flags
= ~0;
2116 struct drbd_peer_device
*peer_device
;
2117 int vnr
, first_vol
= 1;
2118 union drbd_dev_state os
, cs
= {
2119 { .role
= R_SECONDARY
,
2121 .conn
= connection
->cstate
,
2127 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2128 struct drbd_device
*device
= peer_device
->device
;
2137 if (cs
.role
!= os
.role
)
2138 flags
&= ~CS_DC_ROLE
;
2140 if (cs
.peer
!= os
.peer
)
2141 flags
&= ~CS_DC_PEER
;
2143 if (cs
.conn
!= os
.conn
)
2144 flags
&= ~CS_DC_CONN
;
2146 if (cs
.disk
!= os
.disk
)
2147 flags
&= ~CS_DC_DISK
;
2149 if (cs
.pdsk
!= os
.pdsk
)
2150 flags
&= ~CS_DC_PDSK
;
2159 static enum drbd_state_rv
2160 conn_is_valid_transition(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2161 enum chg_state_flags flags
)
2163 enum drbd_state_rv rv
= SS_SUCCESS
;
2164 union drbd_state ns
, os
;
2165 struct drbd_peer_device
*peer_device
;
2169 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2170 struct drbd_device
*device
= peer_device
->device
;
2171 os
= drbd_read_state(device
);
2172 ns
= sanitize_state(device
, os
, apply_mask_val(os
, mask
, val
), NULL
);
2174 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
2180 rv
= is_valid_transition(os
, ns
);
2182 if (rv
>= SS_SUCCESS
&& !(flags
& CS_HARD
)) {
2183 rv
= is_valid_state(device
, ns
);
2184 if (rv
< SS_SUCCESS
) {
2185 if (is_valid_state(device
, os
) == rv
)
2186 rv
= is_valid_soft_transition(os
, ns
, connection
);
2188 rv
= is_valid_soft_transition(os
, ns
, connection
);
2191 if (rv
< SS_SUCCESS
) {
2192 if (flags
& CS_VERBOSE
)
2193 print_st_err(device
, os
, ns
, rv
);
2203 conn_set_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2204 union drbd_state
*pns_min
, union drbd_state
*pns_max
, enum chg_state_flags flags
)
2206 union drbd_state ns
, os
, ns_max
= { };
2207 union drbd_state ns_min
= {
2214 struct drbd_peer_device
*peer_device
;
2215 enum drbd_state_rv rv
;
2216 int vnr
, number_of_volumes
= 0;
2218 if (mask
.conn
== C_MASK
) {
2219 /* remember last connect time so request_timer_fn() won't
2220 * kill newly established sessions while we are still trying to thaw
2221 * previously frozen IO */
2222 if (connection
->cstate
!= C_WF_REPORT_PARAMS
&& val
.conn
== C_WF_REPORT_PARAMS
)
2223 connection
->last_reconnect_jif
= jiffies
;
2225 connection
->cstate
= val
.conn
;
2229 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2230 struct drbd_device
*device
= peer_device
->device
;
2231 number_of_volumes
++;
2232 os
= drbd_read_state(device
);
2233 ns
= apply_mask_val(os
, mask
, val
);
2234 ns
= sanitize_state(device
, os
, ns
, NULL
);
2236 if (flags
& CS_IGN_OUTD_FAIL
&& ns
.disk
== D_OUTDATED
&& os
.disk
< D_OUTDATED
)
2239 rv
= _drbd_set_state(device
, ns
, flags
, NULL
);
2240 BUG_ON(rv
< SS_SUCCESS
);
2241 ns
.i
= device
->state
.i
;
2242 ns_max
.role
= max_role(ns
.role
, ns_max
.role
);
2243 ns_max
.peer
= max_role(ns
.peer
, ns_max
.peer
);
2244 ns_max
.conn
= max_t(enum drbd_conns
, ns
.conn
, ns_max
.conn
);
2245 ns_max
.disk
= max_t(enum drbd_disk_state
, ns
.disk
, ns_max
.disk
);
2246 ns_max
.pdsk
= max_t(enum drbd_disk_state
, ns
.pdsk
, ns_max
.pdsk
);
2248 ns_min
.role
= min_role(ns
.role
, ns_min
.role
);
2249 ns_min
.peer
= min_role(ns
.peer
, ns_min
.peer
);
2250 ns_min
.conn
= min_t(enum drbd_conns
, ns
.conn
, ns_min
.conn
);
2251 ns_min
.disk
= min_t(enum drbd_disk_state
, ns
.disk
, ns_min
.disk
);
2252 ns_min
.pdsk
= min_t(enum drbd_disk_state
, ns
.pdsk
, ns_min
.pdsk
);
2256 if (number_of_volumes
== 0) {
2257 ns_min
= ns_max
= (union drbd_state
) { {
2258 .role
= R_SECONDARY
,
2266 ns_min
.susp
= ns_max
.susp
= connection
->resource
->susp
;
2267 ns_min
.susp_nod
= ns_max
.susp_nod
= connection
->resource
->susp_nod
;
2268 ns_min
.susp_fen
= ns_max
.susp_fen
= connection
->resource
->susp_fen
;
2274 static enum drbd_state_rv
2275 _conn_rq_cond(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
)
2277 enum drbd_state_rv err
, rv
= SS_UNKNOWN_ERROR
; /* continue waiting */;
2279 if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY
, &connection
->flags
))
2282 if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL
, &connection
->flags
))
2283 rv
= SS_CW_FAILED_BY_PEER
;
2285 err
= conn_is_valid_transition(connection
, mask
, val
, 0);
2286 if (err
== SS_SUCCESS
&& connection
->cstate
== C_WF_REPORT_PARAMS
)
2293 _conn_request_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2294 enum chg_state_flags flags
)
2296 enum drbd_state_rv rv
= SS_SUCCESS
;
2297 struct after_conn_state_chg_work
*acscw
;
2298 enum drbd_conns oc
= connection
->cstate
;
2299 union drbd_state ns_max
, ns_min
, os
;
2300 bool have_mutex
= false;
2301 struct drbd_state_change
*state_change
;
2304 rv
= is_valid_conn_transition(oc
, val
.conn
);
2305 if (rv
< SS_SUCCESS
)
2309 rv
= conn_is_valid_transition(connection
, mask
, val
, flags
);
2310 if (rv
< SS_SUCCESS
)
2313 if (oc
== C_WF_REPORT_PARAMS
&& val
.conn
== C_DISCONNECTING
&&
2314 !(flags
& (CS_LOCAL_ONLY
| CS_HARD
))) {
2316 /* This will be a cluster-wide state change.
2317 * Need to give up the spinlock, grab the mutex,
2318 * then send the state change request, ... */
2319 spin_unlock_irq(&connection
->resource
->req_lock
);
2320 mutex_lock(&connection
->cstate_mutex
);
2323 set_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
2324 if (conn_send_state_req(connection
, mask
, val
)) {
2325 /* sending failed. */
2326 clear_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
2327 rv
= SS_CW_FAILED_BY_PEER
;
2328 /* need to re-aquire the spin lock, though */
2329 goto abort_unlocked
;
2332 if (val
.conn
== C_DISCONNECTING
)
2333 set_bit(DISCONNECT_SENT
, &connection
->flags
);
2335 /* ... and re-aquire the spinlock.
2336 * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
2337 * conn_set_state() within the same spinlock. */
2338 spin_lock_irq(&connection
->resource
->req_lock
);
2339 wait_event_lock_irq(connection
->ping_wait
,
2340 (rv
= _conn_rq_cond(connection
, mask
, val
)),
2341 connection
->resource
->req_lock
);
2342 clear_bit(CONN_WD_ST_CHG_REQ
, &connection
->flags
);
2343 if (rv
< SS_SUCCESS
)
2347 state_change
= remember_old_state(connection
->resource
, GFP_ATOMIC
);
2348 conn_old_common_state(connection
, &os
, &flags
);
2349 flags
|= CS_DC_SUSP
;
2350 conn_set_state(connection
, mask
, val
, &ns_min
, &ns_max
, flags
);
2351 conn_pr_state_change(connection
, os
, ns_max
, flags
);
2352 remember_new_state(state_change
);
2354 acscw
= kmalloc(sizeof(*acscw
), GFP_ATOMIC
);
2356 acscw
->oc
= os
.conn
;
2357 acscw
->ns_min
= ns_min
;
2358 acscw
->ns_max
= ns_max
;
2359 acscw
->flags
= flags
;
2360 acscw
->w
.cb
= w_after_conn_state_ch
;
2361 kref_get(&connection
->kref
);
2362 acscw
->connection
= connection
;
2363 acscw
->state_change
= state_change
;
2364 drbd_queue_work(&connection
->sender_work
, &acscw
->w
);
2366 drbd_err(connection
, "Could not kmalloc an acscw\n");
2371 /* mutex_unlock() "... must not be used in interrupt context.",
2372 * so give up the spinlock, then re-aquire it */
2373 spin_unlock_irq(&connection
->resource
->req_lock
);
2375 mutex_unlock(&connection
->cstate_mutex
);
2376 spin_lock_irq(&connection
->resource
->req_lock
);
2378 if (rv
< SS_SUCCESS
&& flags
& CS_VERBOSE
) {
2379 drbd_err(connection
, "State change failed: %s\n", drbd_set_st_err_str(rv
));
2380 drbd_err(connection
, " mask = 0x%x val = 0x%x\n", mask
.i
, val
.i
);
2381 drbd_err(connection
, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc
), drbd_conn_str(val
.conn
));
2387 conn_request_state(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
,
2388 enum chg_state_flags flags
)
2390 enum drbd_state_rv rv
;
2392 spin_lock_irq(&connection
->resource
->req_lock
);
2393 rv
= _conn_request_state(connection
, mask
, val
, flags
);
2394 spin_unlock_irq(&connection
->resource
->req_lock
);