1 // SPDX-License-Identifier: GPL-2.0-or-later
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/drbd.h>
20 #include <linux/file.h>
21 #include <linux/slab.h>
22 #include <linux/blkpg.h>
23 #include <linux/cpumask.h>
25 #include "drbd_protocol.h"
27 #include "drbd_state_change.h"
28 #include <asm/unaligned.h>
29 #include <linux/drbd_limits.h>
30 #include <linux/kthread.h>
32 #include <net/genetlink.h>
35 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
38 int drbd_adm_new_minor(struct sk_buff
*skb
, struct genl_info
*info
);
39 int drbd_adm_del_minor(struct sk_buff
*skb
, struct genl_info
*info
);
41 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
);
42 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
);
43 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
);
45 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
);
46 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
);
47 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
);
48 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
);
49 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
);
50 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
);
51 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
);
52 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
);
53 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
);
54 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
);
55 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
);
56 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
);
57 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
);
58 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
);
59 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
);
60 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
);
61 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
);
62 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
);
63 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
);
64 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
);
66 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
);
67 int drbd_adm_dump_resources(struct sk_buff
*skb
, struct netlink_callback
*cb
);
68 int drbd_adm_dump_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
);
69 int drbd_adm_dump_devices_done(struct netlink_callback
*cb
);
70 int drbd_adm_dump_connections(struct sk_buff
*skb
, struct netlink_callback
*cb
);
71 int drbd_adm_dump_connections_done(struct netlink_callback
*cb
);
72 int drbd_adm_dump_peer_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
);
73 int drbd_adm_dump_peer_devices_done(struct netlink_callback
*cb
);
74 int drbd_adm_get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
);
76 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 static atomic_t drbd_genl_seq
= ATOMIC_INIT(2); /* two. */
81 static atomic_t notify_genl_seq
= ATOMIC_INIT(2); /* two. */
83 DEFINE_MUTEX(notification_mutex
);
85 /* used blkdev_get_by_path, to claim our meta data device(s) */
86 static char *drbd_m_holder
= "Hands off! this is DRBD's meta data device.";
88 static void drbd_adm_send_reply(struct sk_buff
*skb
, struct genl_info
*info
)
90 genlmsg_end(skb
, genlmsg_data(nlmsg_data(nlmsg_hdr(skb
))));
91 if (genlmsg_reply(skb
, info
))
92 pr_err("error sending genl reply\n");
95 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
96 * reason it could fail was no space in skb, and there are 4k available. */
97 static int drbd_msg_put_info(struct sk_buff
*skb
, const char *info
)
102 if (!info
|| !info
[0])
105 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_CFG_REPLY
);
109 err
= nla_put_string(skb
, T_info_text
, info
);
111 nla_nest_cancel(skb
, nla
);
114 nla_nest_end(skb
, nla
);
119 static int drbd_msg_sprintf_info(struct sk_buff
*skb
, const char *fmt
, ...)
122 struct nlattr
*nla
, *txt
;
126 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_CFG_REPLY
);
130 txt
= nla_reserve(skb
, T_info_text
, 256);
132 nla_nest_cancel(skb
, nla
);
136 len
= vscnprintf(nla_data(txt
), 256, fmt
, args
);
139 /* maybe: retry with larger reserve, if truncated */
140 txt
->nla_len
= nla_attr_size(len
+1);
141 nlmsg_trim(skb
, (char*)txt
+ NLA_ALIGN(txt
->nla_len
));
142 nla_nest_end(skb
, nla
);
147 /* This would be a good candidate for a "pre_doit" hook,
148 * and per-family private info->pointers.
149 * But we need to stay compatible with older kernels.
150 * If it returns successfully, adm_ctx members are valid.
152 * At this point, we still rely on the global genl_lock().
153 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
154 * to add additional synchronization against object destruction/modification.
156 #define DRBD_ADM_NEED_MINOR 1
157 #define DRBD_ADM_NEED_RESOURCE 2
158 #define DRBD_ADM_NEED_CONNECTION 4
159 static int drbd_adm_prepare(struct drbd_config_context
*adm_ctx
,
160 struct sk_buff
*skb
, struct genl_info
*info
, unsigned flags
)
162 struct drbd_genlmsghdr
*d_in
= info
->userhdr
;
163 const u8 cmd
= info
->genlhdr
->cmd
;
166 memset(adm_ctx
, 0, sizeof(*adm_ctx
));
168 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
169 if (cmd
!= DRBD_ADM_GET_STATUS
&& !capable(CAP_NET_ADMIN
))
172 adm_ctx
->reply_skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
173 if (!adm_ctx
->reply_skb
) {
178 adm_ctx
->reply_dh
= genlmsg_put_reply(adm_ctx
->reply_skb
,
179 info
, &drbd_genl_family
, 0, cmd
);
180 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
182 if (!adm_ctx
->reply_dh
) {
187 adm_ctx
->reply_dh
->minor
= d_in
->minor
;
188 adm_ctx
->reply_dh
->ret_code
= NO_ERROR
;
190 adm_ctx
->volume
= VOLUME_UNSPECIFIED
;
191 if (info
->attrs
[DRBD_NLA_CFG_CONTEXT
]) {
193 /* parse and validate only */
194 err
= drbd_cfg_context_from_attrs(NULL
, info
);
198 /* It was present, and valid,
199 * copy it over to the reply skb. */
200 err
= nla_put_nohdr(adm_ctx
->reply_skb
,
201 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]->nla_len
,
202 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]);
206 /* and assign stuff to the adm_ctx */
207 nla
= nested_attr_tb
[__nla_type(T_ctx_volume
)];
209 adm_ctx
->volume
= nla_get_u32(nla
);
210 nla
= nested_attr_tb
[__nla_type(T_ctx_resource_name
)];
212 adm_ctx
->resource_name
= nla_data(nla
);
213 adm_ctx
->my_addr
= nested_attr_tb
[__nla_type(T_ctx_my_addr
)];
214 adm_ctx
->peer_addr
= nested_attr_tb
[__nla_type(T_ctx_peer_addr
)];
215 if ((adm_ctx
->my_addr
&&
216 nla_len(adm_ctx
->my_addr
) > sizeof(adm_ctx
->connection
->my_addr
)) ||
217 (adm_ctx
->peer_addr
&&
218 nla_len(adm_ctx
->peer_addr
) > sizeof(adm_ctx
->connection
->peer_addr
))) {
224 adm_ctx
->minor
= d_in
->minor
;
225 adm_ctx
->device
= minor_to_device(d_in
->minor
);
227 /* We are protected by the global genl_lock().
228 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
229 * so make sure this object stays around. */
231 kref_get(&adm_ctx
->device
->kref
);
233 if (adm_ctx
->resource_name
) {
234 adm_ctx
->resource
= drbd_find_resource(adm_ctx
->resource_name
);
237 if (!adm_ctx
->device
&& (flags
& DRBD_ADM_NEED_MINOR
)) {
238 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown minor");
239 return ERR_MINOR_INVALID
;
241 if (!adm_ctx
->resource
&& (flags
& DRBD_ADM_NEED_RESOURCE
)) {
242 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown resource");
243 if (adm_ctx
->resource_name
)
244 return ERR_RES_NOT_KNOWN
;
245 return ERR_INVALID_REQUEST
;
248 if (flags
& DRBD_ADM_NEED_CONNECTION
) {
249 if (adm_ctx
->resource
) {
250 drbd_msg_put_info(adm_ctx
->reply_skb
, "no resource name expected");
251 return ERR_INVALID_REQUEST
;
253 if (adm_ctx
->device
) {
254 drbd_msg_put_info(adm_ctx
->reply_skb
, "no minor number expected");
255 return ERR_INVALID_REQUEST
;
257 if (adm_ctx
->my_addr
&& adm_ctx
->peer_addr
)
258 adm_ctx
->connection
= conn_get_by_addrs(nla_data(adm_ctx
->my_addr
),
259 nla_len(adm_ctx
->my_addr
),
260 nla_data(adm_ctx
->peer_addr
),
261 nla_len(adm_ctx
->peer_addr
));
262 if (!adm_ctx
->connection
) {
263 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown connection");
264 return ERR_INVALID_REQUEST
;
268 /* some more paranoia, if the request was over-determined */
269 if (adm_ctx
->device
&& adm_ctx
->resource
&&
270 adm_ctx
->device
->resource
!= adm_ctx
->resource
) {
271 pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
272 adm_ctx
->minor
, adm_ctx
->resource
->name
,
273 adm_ctx
->device
->resource
->name
);
274 drbd_msg_put_info(adm_ctx
->reply_skb
, "minor exists in different resource");
275 return ERR_INVALID_REQUEST
;
277 if (adm_ctx
->device
&&
278 adm_ctx
->volume
!= VOLUME_UNSPECIFIED
&&
279 adm_ctx
->volume
!= adm_ctx
->device
->vnr
) {
280 pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
281 adm_ctx
->minor
, adm_ctx
->volume
,
282 adm_ctx
->device
->vnr
, adm_ctx
->device
->resource
->name
);
283 drbd_msg_put_info(adm_ctx
->reply_skb
, "minor exists as different volume");
284 return ERR_INVALID_REQUEST
;
287 /* still, provide adm_ctx->resource always, if possible. */
288 if (!adm_ctx
->resource
) {
289 adm_ctx
->resource
= adm_ctx
->device
? adm_ctx
->device
->resource
290 : adm_ctx
->connection
? adm_ctx
->connection
->resource
: NULL
;
291 if (adm_ctx
->resource
)
292 kref_get(&adm_ctx
->resource
->kref
);
298 nlmsg_free(adm_ctx
->reply_skb
);
299 adm_ctx
->reply_skb
= NULL
;
303 static int drbd_adm_finish(struct drbd_config_context
*adm_ctx
,
304 struct genl_info
*info
, int retcode
)
306 if (adm_ctx
->device
) {
307 kref_put(&adm_ctx
->device
->kref
, drbd_destroy_device
);
308 adm_ctx
->device
= NULL
;
310 if (adm_ctx
->connection
) {
311 kref_put(&adm_ctx
->connection
->kref
, &drbd_destroy_connection
);
312 adm_ctx
->connection
= NULL
;
314 if (adm_ctx
->resource
) {
315 kref_put(&adm_ctx
->resource
->kref
, drbd_destroy_resource
);
316 adm_ctx
->resource
= NULL
;
319 if (!adm_ctx
->reply_skb
)
322 adm_ctx
->reply_dh
->ret_code
= retcode
;
323 drbd_adm_send_reply(adm_ctx
->reply_skb
, info
);
327 static void setup_khelper_env(struct drbd_connection
*connection
, char **envp
)
331 /* FIXME: A future version will not allow this case. */
332 if (connection
->my_addr_len
== 0 || connection
->peer_addr_len
== 0)
335 switch (((struct sockaddr
*)&connection
->peer_addr
)->sa_family
) {
338 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI6",
339 &((struct sockaddr_in6
*)&connection
->peer_addr
)->sin6_addr
);
343 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
344 &((struct sockaddr_in
*)&connection
->peer_addr
)->sin_addr
);
348 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
349 &((struct sockaddr_in
*)&connection
->peer_addr
)->sin_addr
);
351 snprintf(envp
[3], 20, "DRBD_PEER_AF=%s", afs
);
354 int drbd_khelper(struct drbd_device
*device
, char *cmd
)
356 char *envp
[] = { "HOME=/",
358 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
359 (char[20]) { }, /* address family */
360 (char[60]) { }, /* address */
363 char *argv
[] = {drbd_usermode_helper
, cmd
, mb
, NULL
};
364 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
368 if (current
== connection
->worker
.task
)
369 set_bit(CALLBACK_PENDING
, &connection
->flags
);
371 snprintf(mb
, 14, "minor-%d", device_to_minor(device
));
372 setup_khelper_env(connection
, envp
);
374 /* The helper may take some time.
375 * write out any unsynced meta data changes now */
376 drbd_md_sync(device
);
378 drbd_info(device
, "helper command: %s %s %s\n", drbd_usermode_helper
, cmd
, mb
);
379 sib
.sib_reason
= SIB_HELPER_PRE
;
380 sib
.helper_name
= cmd
;
381 drbd_bcast_event(device
, &sib
);
382 notify_helper(NOTIFY_CALL
, device
, connection
, cmd
, 0);
383 ret
= call_usermodehelper(drbd_usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
385 drbd_warn(device
, "helper command: %s %s %s exit code %u (0x%x)\n",
386 drbd_usermode_helper
, cmd
, mb
,
387 (ret
>> 8) & 0xff, ret
);
389 drbd_info(device
, "helper command: %s %s %s exit code %u (0x%x)\n",
390 drbd_usermode_helper
, cmd
, mb
,
391 (ret
>> 8) & 0xff, ret
);
392 sib
.sib_reason
= SIB_HELPER_POST
;
393 sib
.helper_exit_code
= ret
;
394 drbd_bcast_event(device
, &sib
);
395 notify_helper(NOTIFY_RESPONSE
, device
, connection
, cmd
, ret
);
397 if (current
== connection
->worker
.task
)
398 clear_bit(CALLBACK_PENDING
, &connection
->flags
);
400 if (ret
< 0) /* Ignore any ERRNOs we got. */
406 enum drbd_peer_state
conn_khelper(struct drbd_connection
*connection
, char *cmd
)
408 char *envp
[] = { "HOME=/",
410 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
411 (char[20]) { }, /* address family */
412 (char[60]) { }, /* address */
414 char *resource_name
= connection
->resource
->name
;
415 char *argv
[] = {drbd_usermode_helper
, cmd
, resource_name
, NULL
};
418 setup_khelper_env(connection
, envp
);
419 conn_md_sync(connection
);
421 drbd_info(connection
, "helper command: %s %s %s\n", drbd_usermode_helper
, cmd
, resource_name
);
422 /* TODO: conn_bcast_event() ?? */
423 notify_helper(NOTIFY_CALL
, NULL
, connection
, cmd
, 0);
425 ret
= call_usermodehelper(drbd_usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
427 drbd_warn(connection
, "helper command: %s %s %s exit code %u (0x%x)\n",
428 drbd_usermode_helper
, cmd
, resource_name
,
429 (ret
>> 8) & 0xff, ret
);
431 drbd_info(connection
, "helper command: %s %s %s exit code %u (0x%x)\n",
432 drbd_usermode_helper
, cmd
, resource_name
,
433 (ret
>> 8) & 0xff, ret
);
434 /* TODO: conn_bcast_event() ?? */
435 notify_helper(NOTIFY_RESPONSE
, NULL
, connection
, cmd
, ret
);
437 if (ret
< 0) /* Ignore any ERRNOs we got. */
443 static enum drbd_fencing_p
highest_fencing_policy(struct drbd_connection
*connection
)
445 enum drbd_fencing_p fp
= FP_NOT_AVAIL
;
446 struct drbd_peer_device
*peer_device
;
450 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
451 struct drbd_device
*device
= peer_device
->device
;
452 if (get_ldev_if_state(device
, D_CONSISTENT
)) {
453 struct disk_conf
*disk_conf
=
454 rcu_dereference(peer_device
->device
->ldev
->disk_conf
);
455 fp
= max_t(enum drbd_fencing_p
, fp
, disk_conf
->fencing
);
464 static bool resource_is_supended(struct drbd_resource
*resource
)
466 return resource
->susp
|| resource
->susp_fen
|| resource
->susp_nod
;
469 bool conn_try_outdate_peer(struct drbd_connection
*connection
)
471 struct drbd_resource
* const resource
= connection
->resource
;
472 unsigned int connect_cnt
;
473 union drbd_state mask
= { };
474 union drbd_state val
= { };
475 enum drbd_fencing_p fp
;
479 spin_lock_irq(&resource
->req_lock
);
480 if (connection
->cstate
>= C_WF_REPORT_PARAMS
) {
481 drbd_err(connection
, "Expected cstate < C_WF_REPORT_PARAMS\n");
482 spin_unlock_irq(&resource
->req_lock
);
486 connect_cnt
= connection
->connect_cnt
;
487 spin_unlock_irq(&resource
->req_lock
);
489 fp
= highest_fencing_policy(connection
);
492 drbd_warn(connection
, "Not fencing peer, I'm not even Consistent myself.\n");
493 spin_lock_irq(&resource
->req_lock
);
494 if (connection
->cstate
< C_WF_REPORT_PARAMS
) {
495 _conn_request_state(connection
,
496 (union drbd_state
) { { .susp_fen
= 1 } },
497 (union drbd_state
) { { .susp_fen
= 0 } },
498 CS_VERBOSE
| CS_HARD
| CS_DC_SUSP
);
499 /* We are no longer suspended due to the fencing policy.
500 * We may still be suspended due to the on-no-data-accessible policy.
501 * If that was OND_IO_ERROR, fail pending requests. */
502 if (!resource_is_supended(resource
))
503 _tl_restart(connection
, CONNECTION_LOST_WHILE_PENDING
);
505 /* Else: in case we raced with a connection handshake,
506 * let the handshake figure out if we maybe can RESEND,
507 * and do not resume/fail pending requests here.
508 * Worst case is we stay suspended for now, which may be
509 * resolved by either re-establishing the replication link, or
510 * the next link failure, or eventually the administrator. */
511 spin_unlock_irq(&resource
->req_lock
);
519 r
= conn_khelper(connection
, "fence-peer");
521 switch ((r
>>8) & 0xff) {
522 case P_INCONSISTENT
: /* peer is inconsistent */
523 ex_to_string
= "peer is inconsistent or worse";
525 val
.pdsk
= D_INCONSISTENT
;
527 case P_OUTDATED
: /* peer got outdated, or was already outdated */
528 ex_to_string
= "peer was fenced";
530 val
.pdsk
= D_OUTDATED
;
532 case P_DOWN
: /* peer was down */
533 if (conn_highest_disk(connection
) == D_UP_TO_DATE
) {
534 /* we will(have) create(d) a new UUID anyways... */
535 ex_to_string
= "peer is unreachable, assumed to be dead";
537 val
.pdsk
= D_OUTDATED
;
539 ex_to_string
= "peer unreachable, doing nothing since disk != UpToDate";
542 case P_PRIMARY
: /* Peer is primary, voluntarily outdate myself.
543 * This is useful when an unconnected R_SECONDARY is asked to
544 * become R_PRIMARY, but finds the other peer being active. */
545 ex_to_string
= "peer is active";
546 drbd_warn(connection
, "Peer is primary, outdating myself.\n");
548 val
.disk
= D_OUTDATED
;
551 /* THINK: do we need to handle this
552 * like case 4, or more like case 5? */
553 if (fp
!= FP_STONITH
)
554 drbd_err(connection
, "fence-peer() = 7 && fencing != Stonith !!!\n");
555 ex_to_string
= "peer was stonithed";
557 val
.pdsk
= D_OUTDATED
;
560 /* The script is broken ... */
561 drbd_err(connection
, "fence-peer helper broken, returned %d\n", (r
>>8)&0xff);
562 return false; /* Eventually leave IO frozen */
565 drbd_info(connection
, "fence-peer helper returned %d (%s)\n",
566 (r
>>8) & 0xff, ex_to_string
);
569 conn_request_state(connection, mask, val, CS_VERBOSE);
570 here, because we might were able to re-establish the connection in the
572 spin_lock_irq(&resource
->req_lock
);
573 if (connection
->cstate
< C_WF_REPORT_PARAMS
&& !test_bit(STATE_SENT
, &connection
->flags
)) {
574 if (connection
->connect_cnt
!= connect_cnt
)
575 /* In case the connection was established and droped
576 while the fence-peer handler was running, ignore it */
577 drbd_info(connection
, "Ignoring fence-peer exit code\n");
579 _conn_request_state(connection
, mask
, val
, CS_VERBOSE
);
581 spin_unlock_irq(&resource
->req_lock
);
583 return conn_highest_pdsk(connection
) <= D_OUTDATED
;
586 static int _try_outdate_peer_async(void *data
)
588 struct drbd_connection
*connection
= (struct drbd_connection
*)data
;
590 conn_try_outdate_peer(connection
);
592 kref_put(&connection
->kref
, drbd_destroy_connection
);
596 void conn_try_outdate_peer_async(struct drbd_connection
*connection
)
598 struct task_struct
*opa
;
600 kref_get(&connection
->kref
);
601 /* We may have just sent a signal to this thread
602 * to get it out of some blocking network function.
603 * Clear signals; otherwise kthread_run(), which internally uses
604 * wait_on_completion_killable(), will mistake our pending signal
605 * for a new fatal signal and fail. */
606 flush_signals(current
);
607 opa
= kthread_run(_try_outdate_peer_async
, connection
, "drbd_async_h");
609 drbd_err(connection
, "out of mem, failed to invoke fence-peer helper\n");
610 kref_put(&connection
->kref
, drbd_destroy_connection
);
615 drbd_set_role(struct drbd_device
*const device
, enum drbd_role new_role
, int force
)
617 struct drbd_peer_device
*const peer_device
= first_peer_device(device
);
618 struct drbd_connection
*const connection
= peer_device
? peer_device
->connection
: NULL
;
619 const int max_tries
= 4;
620 enum drbd_state_rv rv
= SS_UNKNOWN_ERROR
;
624 union drbd_state mask
, val
;
626 if (new_role
== R_PRIMARY
) {
627 struct drbd_connection
*connection
;
629 /* Detect dead peers as soon as possible. */
632 for_each_connection(connection
, device
->resource
)
633 request_ping(connection
);
637 mutex_lock(device
->state_mutex
);
639 mask
.i
= 0; mask
.role
= R_MASK
;
640 val
.i
= 0; val
.role
= new_role
;
642 while (try++ < max_tries
) {
643 rv
= _drbd_request_state_holding_state_mutex(device
, mask
, val
, CS_WAIT_COMPLETE
);
645 /* in case we first succeeded to outdate,
646 * but now suddenly could establish a connection */
647 if (rv
== SS_CW_FAILED_BY_PEER
&& mask
.pdsk
!= 0) {
653 if (rv
== SS_NO_UP_TO_DATE_DISK
&& force
&&
654 (device
->state
.disk
< D_UP_TO_DATE
&&
655 device
->state
.disk
>= D_INCONSISTENT
)) {
657 val
.disk
= D_UP_TO_DATE
;
662 if (rv
== SS_NO_UP_TO_DATE_DISK
&&
663 device
->state
.disk
== D_CONSISTENT
&& mask
.pdsk
== 0) {
664 D_ASSERT(device
, device
->state
.pdsk
== D_UNKNOWN
);
666 if (conn_try_outdate_peer(connection
)) {
667 val
.disk
= D_UP_TO_DATE
;
673 if (rv
== SS_NOTHING_TO_DO
)
675 if (rv
== SS_PRIMARY_NOP
&& mask
.pdsk
== 0) {
676 if (!conn_try_outdate_peer(connection
) && force
) {
677 drbd_warn(device
, "Forced into split brain situation!\n");
679 val
.pdsk
= D_OUTDATED
;
684 if (rv
== SS_TWO_PRIMARIES
) {
685 /* Maybe the peer is detected as dead very soon...
686 retry at most once more in this case. */
687 if (try < max_tries
) {
691 nc
= rcu_dereference(connection
->net_conf
);
692 timeo
= nc
? (nc
->ping_timeo
+ 1) * HZ
/ 10 : 1;
694 schedule_timeout_interruptible(timeo
);
698 if (rv
< SS_SUCCESS
) {
699 rv
= _drbd_request_state(device
, mask
, val
,
700 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
711 drbd_warn(device
, "Forced to consider local data as UpToDate!\n");
713 /* Wait until nothing is on the fly :) */
714 wait_event(device
->misc_wait
, atomic_read(&device
->ap_pending_cnt
) == 0);
716 /* FIXME also wait for all pending P_BARRIER_ACK? */
718 if (new_role
== R_SECONDARY
) {
719 if (get_ldev(device
)) {
720 device
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
724 mutex_lock(&device
->resource
->conf_update
);
725 nc
= connection
->net_conf
;
727 nc
->discard_my_data
= 0; /* without copy; single bit op is atomic */
728 mutex_unlock(&device
->resource
->conf_update
);
730 if (get_ldev(device
)) {
731 if (((device
->state
.conn
< C_CONNECTED
||
732 device
->state
.pdsk
<= D_FAILED
)
733 && device
->ldev
->md
.uuid
[UI_BITMAP
] == 0) || forced
)
734 drbd_uuid_new_current(device
);
736 device
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
741 /* writeout of activity log covered areas of the bitmap
742 * to stable storage done in after state change already */
744 if (device
->state
.conn
>= C_WF_REPORT_PARAMS
) {
745 /* if this was forced, we should consider sync */
747 drbd_send_uuids(peer_device
);
748 drbd_send_current_state(peer_device
);
751 drbd_md_sync(device
);
752 set_disk_ro(device
->vdisk
, new_role
== R_SECONDARY
);
753 kobject_uevent(&disk_to_dev(device
->vdisk
)->kobj
, KOBJ_CHANGE
);
755 mutex_unlock(device
->state_mutex
);
759 static const char *from_attrs_err_to_txt(int err
)
761 return err
== -ENOMSG
? "required attribute missing" :
762 err
== -EOPNOTSUPP
? "unknown mandatory attribute" :
763 err
== -EEXIST
? "can not change invariant setting" :
764 "invalid attribute value";
767 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
)
769 struct drbd_config_context adm_ctx
;
770 struct set_role_parms parms
;
772 enum drbd_ret_code retcode
;
774 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
775 if (!adm_ctx
.reply_skb
)
777 if (retcode
!= NO_ERROR
)
780 memset(&parms
, 0, sizeof(parms
));
781 if (info
->attrs
[DRBD_NLA_SET_ROLE_PARMS
]) {
782 err
= set_role_parms_from_attrs(&parms
, info
);
784 retcode
= ERR_MANDATORY_TAG
;
785 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
790 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
792 if (info
->genlhdr
->cmd
== DRBD_ADM_PRIMARY
)
793 retcode
= drbd_set_role(adm_ctx
.device
, R_PRIMARY
, parms
.assume_uptodate
);
795 retcode
= drbd_set_role(adm_ctx
.device
, R_SECONDARY
, 0);
797 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
800 drbd_adm_finish(&adm_ctx
, info
, retcode
);
804 /* Initializes the md.*_offset members, so we are able to find
805 * the on disk meta data.
807 * We currently have two possible layouts:
809 * |----------- md_size_sect ------------------|
810 * [ 4k superblock ][ activity log ][ Bitmap ]
812 * | bm_offset = al_offset + X |
813 * ==> bitmap sectors = md_size_sect - bm_offset
816 * |----------- md_size_sect ------------------|
817 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
819 * | bm_offset = al_offset - Y |
820 * ==> bitmap sectors = Y = al_offset - bm_offset
822 * Activity log size used to be fixed 32kB,
823 * but is about to become configurable.
825 static void drbd_md_set_sector_offsets(struct drbd_device
*device
,
826 struct drbd_backing_dev
*bdev
)
828 sector_t md_size_sect
= 0;
829 unsigned int al_size_sect
= bdev
->md
.al_size_4k
* 8;
831 bdev
->md
.md_offset
= drbd_md_ss(bdev
);
833 switch (bdev
->md
.meta_dev_idx
) {
835 /* v07 style fixed size indexed meta data */
836 bdev
->md
.md_size_sect
= MD_128MB_SECT
;
837 bdev
->md
.al_offset
= MD_4kB_SECT
;
838 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
840 case DRBD_MD_INDEX_FLEX_EXT
:
841 /* just occupy the full device; unit: sectors */
842 bdev
->md
.md_size_sect
= drbd_get_capacity(bdev
->md_bdev
);
843 bdev
->md
.al_offset
= MD_4kB_SECT
;
844 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
846 case DRBD_MD_INDEX_INTERNAL
:
847 case DRBD_MD_INDEX_FLEX_INT
:
848 /* al size is still fixed */
849 bdev
->md
.al_offset
= -al_size_sect
;
850 /* we need (slightly less than) ~ this much bitmap sectors: */
851 md_size_sect
= drbd_get_capacity(bdev
->backing_bdev
);
852 md_size_sect
= ALIGN(md_size_sect
, BM_SECT_PER_EXT
);
853 md_size_sect
= BM_SECT_TO_EXT(md_size_sect
);
854 md_size_sect
= ALIGN(md_size_sect
, 8);
856 /* plus the "drbd meta data super block",
857 * and the activity log; */
858 md_size_sect
+= MD_4kB_SECT
+ al_size_sect
;
860 bdev
->md
.md_size_sect
= md_size_sect
;
861 /* bitmap offset is adjusted by 'super' block size */
862 bdev
->md
.bm_offset
= -md_size_sect
+ MD_4kB_SECT
;
867 /* input size is expected to be in KB */
868 char *ppsize(char *buf
, unsigned long long size
)
870 /* Needs 9 bytes at max including trailing NUL:
871 * -1ULL ==> "16384 EB" */
872 static char units
[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
874 while (size
>= 10000 && base
< sizeof(units
)-1) {
876 size
= (size
>> 10) + !!(size
& (1<<9));
879 sprintf(buf
, "%u %cB", (unsigned)size
, units
[base
]);
884 /* there is still a theoretical deadlock when called from receiver
885 * on an D_INCONSISTENT R_PRIMARY:
886 * remote READ does inc_ap_bio, receiver would need to receive answer
887 * packet from remote to dec_ap_bio again.
888 * receiver receive_sizes(), comes here,
889 * waits for ap_bio_cnt == 0. -> deadlock.
890 * but this cannot happen, actually, because:
891 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
892 * (not connected, or bad/no disk on peer):
893 * see drbd_fail_request_early, ap_bio_cnt is zero.
894 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
895 * peer may not initiate a resize.
897 /* Note these are not to be confused with
898 * drbd_adm_suspend_io/drbd_adm_resume_io,
899 * which are (sub) state changes triggered by admin (drbdsetup),
900 * and can be long lived.
901 * This changes an device->flag, is triggered by drbd internals,
902 * and should be short-lived. */
903 /* It needs to be a counter, since multiple threads might
904 independently suspend and resume IO. */
905 void drbd_suspend_io(struct drbd_device
*device
)
907 atomic_inc(&device
->suspend_cnt
);
908 if (drbd_suspended(device
))
910 wait_event(device
->misc_wait
, !atomic_read(&device
->ap_bio_cnt
));
913 void drbd_resume_io(struct drbd_device
*device
)
915 if (atomic_dec_and_test(&device
->suspend_cnt
))
916 wake_up(&device
->misc_wait
);
920 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
921 * @device: DRBD device.
923 * Returns 0 on success, negative return values indicate errors.
924 * You should call drbd_md_sync() after calling this function.
926 enum determine_dev_size
927 drbd_determine_dev_size(struct drbd_device
*device
, enum dds_flags flags
, struct resize_parms
*rs
) __must_hold(local
)
929 struct md_offsets_and_sizes
{
930 u64 last_agreed_sect
;
937 u32 al_stripe_size_4k
;
939 sector_t u_size
, size
;
940 struct drbd_md
*md
= &device
->ldev
->md
;
943 int md_moved
, la_size_changed
;
944 enum determine_dev_size rv
= DS_UNCHANGED
;
946 /* We may change the on-disk offsets of our meta data below. Lock out
947 * anything that may cause meta data IO, to avoid acting on incomplete
948 * layout changes or scribbling over meta data that is in the process
951 * Move is not exactly correct, btw, currently we have all our meta
952 * data in core memory, to "move" it we just write it all out, there
954 drbd_suspend_io(device
);
955 buffer
= drbd_md_get_buffer(device
, __func__
); /* Lock meta-data IO */
957 drbd_resume_io(device
);
961 /* remember current offset and sizes */
962 prev
.last_agreed_sect
= md
->la_size_sect
;
963 prev
.md_offset
= md
->md_offset
;
964 prev
.al_offset
= md
->al_offset
;
965 prev
.bm_offset
= md
->bm_offset
;
966 prev
.md_size_sect
= md
->md_size_sect
;
967 prev
.al_stripes
= md
->al_stripes
;
968 prev
.al_stripe_size_4k
= md
->al_stripe_size_4k
;
971 /* rs is non NULL if we should change the AL layout only */
972 md
->al_stripes
= rs
->al_stripes
;
973 md
->al_stripe_size_4k
= rs
->al_stripe_size
/ 4;
974 md
->al_size_4k
= (u64
)rs
->al_stripes
* rs
->al_stripe_size
/ 4;
977 drbd_md_set_sector_offsets(device
, device
->ldev
);
980 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
982 size
= drbd_new_dev_size(device
, device
->ldev
, u_size
, flags
& DDSF_FORCED
);
984 if (size
< prev
.last_agreed_sect
) {
985 if (rs
&& u_size
== 0) {
986 /* Remove "rs &&" later. This check should always be active, but
987 right now the receiver expects the permissive behavior */
988 drbd_warn(device
, "Implicit shrink not allowed. "
989 "Use --size=%llus for explicit shrink.\n",
990 (unsigned long long)size
);
991 rv
= DS_ERROR_SHRINK
;
994 rv
= DS_ERROR_SPACE_MD
;
995 if (rv
!= DS_UNCHANGED
)
999 if (drbd_get_capacity(device
->this_bdev
) != size
||
1000 drbd_bm_capacity(device
) != size
) {
1002 err
= drbd_bm_resize(device
, size
, !(flags
& DDSF_NO_RESYNC
));
1003 if (unlikely(err
)) {
1004 /* currently there is only one error: ENOMEM! */
1005 size
= drbd_bm_capacity(device
);
1007 drbd_err(device
, "OUT OF MEMORY! "
1008 "Could not allocate bitmap!\n");
1010 drbd_err(device
, "BM resizing failed. "
1011 "Leaving size unchanged\n");
1015 /* racy, see comments above. */
1016 drbd_set_my_capacity(device
, size
);
1017 md
->la_size_sect
= size
;
1022 la_size_changed
= (prev
.last_agreed_sect
!= md
->la_size_sect
);
1024 md_moved
= prev
.md_offset
!= md
->md_offset
1025 || prev
.md_size_sect
!= md
->md_size_sect
;
1027 if (la_size_changed
|| md_moved
|| rs
) {
1030 /* We do some synchronous IO below, which may take some time.
1031 * Clear the timer, to avoid scary "timer expired!" messages,
1032 * "Superblock" is written out at least twice below, anyways. */
1033 del_timer(&device
->md_sync_timer
);
1035 /* We won't change the "al-extents" setting, we just may need
1036 * to move the on-disk location of the activity log ringbuffer.
1037 * Lock for transaction is good enough, it may well be "dirty"
1038 * or even "starving". */
1039 wait_event(device
->al_wait
, lc_try_lock_for_transaction(device
->act_log
));
1041 /* mark current on-disk bitmap and activity log as unreliable */
1042 prev_flags
= md
->flags
;
1043 md
->flags
|= MDF_FULL_SYNC
| MDF_AL_DISABLED
;
1044 drbd_md_write(device
, buffer
);
1046 drbd_al_initialize(device
, buffer
);
1048 drbd_info(device
, "Writing the whole bitmap, %s\n",
1049 la_size_changed
&& md_moved
? "size changed and md moved" :
1050 la_size_changed
? "size changed" : "md moved");
1051 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1052 drbd_bitmap_io(device
, md_moved
? &drbd_bm_write_all
: &drbd_bm_write
,
1053 "size changed", BM_LOCKED_MASK
);
1055 /* on-disk bitmap and activity log is authoritative again
1056 * (unless there was an IO error meanwhile...) */
1057 md
->flags
= prev_flags
;
1058 drbd_md_write(device
, buffer
);
1061 drbd_info(device
, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1062 md
->al_stripes
, md
->al_stripe_size_4k
* 4);
1065 if (size
> prev
.last_agreed_sect
)
1066 rv
= prev
.last_agreed_sect
? DS_GREW
: DS_GREW_FROM_ZERO
;
1067 if (size
< prev
.last_agreed_sect
)
1072 /* restore previous offset and sizes */
1073 md
->la_size_sect
= prev
.last_agreed_sect
;
1074 md
->md_offset
= prev
.md_offset
;
1075 md
->al_offset
= prev
.al_offset
;
1076 md
->bm_offset
= prev
.bm_offset
;
1077 md
->md_size_sect
= prev
.md_size_sect
;
1078 md
->al_stripes
= prev
.al_stripes
;
1079 md
->al_stripe_size_4k
= prev
.al_stripe_size_4k
;
1080 md
->al_size_4k
= (u64
)prev
.al_stripes
* prev
.al_stripe_size_4k
;
1082 lc_unlock(device
->act_log
);
1083 wake_up(&device
->al_wait
);
1084 drbd_md_put_buffer(device
);
1085 drbd_resume_io(device
);
1091 drbd_new_dev_size(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
,
1092 sector_t u_size
, int assume_peer_has_space
)
1094 sector_t p_size
= device
->p_size
; /* partner's disk size. */
1095 sector_t la_size_sect
= bdev
->md
.la_size_sect
; /* last agreed size. */
1096 sector_t m_size
; /* my size */
1099 m_size
= drbd_get_max_capacity(bdev
);
1101 if (device
->state
.conn
< C_CONNECTED
&& assume_peer_has_space
) {
1102 drbd_warn(device
, "Resize while not connected was forced by the user!\n");
1106 if (p_size
&& m_size
) {
1107 size
= min_t(sector_t
, p_size
, m_size
);
1110 size
= la_size_sect
;
1111 if (m_size
&& m_size
< size
)
1113 if (p_size
&& p_size
< size
)
1124 drbd_err(device
, "Both nodes diskless!\n");
1128 drbd_err(device
, "Requested disk size is too big (%lu > %lu)\n",
1129 (unsigned long)u_size
>>1, (unsigned long)size
>>1);
1138 * drbd_check_al_size() - Ensures that the AL is of the right size
1139 * @device: DRBD device.
1141 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1142 * failed, and 0 on success. You should call drbd_md_sync() after you called
1145 static int drbd_check_al_size(struct drbd_device
*device
, struct disk_conf
*dc
)
1147 struct lru_cache
*n
, *t
;
1148 struct lc_element
*e
;
1149 unsigned int in_use
;
1152 if (device
->act_log
&&
1153 device
->act_log
->nr_elements
== dc
->al_extents
)
1157 t
= device
->act_log
;
1158 n
= lc_create("act_log", drbd_al_ext_cache
, AL_UPDATES_PER_TRANSACTION
,
1159 dc
->al_extents
, sizeof(struct lc_element
), 0);
1162 drbd_err(device
, "Cannot allocate act_log lru!\n");
1165 spin_lock_irq(&device
->al_lock
);
1167 for (i
= 0; i
< t
->nr_elements
; i
++) {
1168 e
= lc_element_by_index(t
, i
);
1170 drbd_err(device
, "refcnt(%d)==%d\n",
1171 e
->lc_number
, e
->refcnt
);
1172 in_use
+= e
->refcnt
;
1176 device
->act_log
= n
;
1177 spin_unlock_irq(&device
->al_lock
);
1179 drbd_err(device
, "Activity log still in use!\n");
1185 drbd_md_mark_dirty(device
); /* we changed device->act_log->nr_elemens */
1189 static void blk_queue_discard_granularity(struct request_queue
*q
, unsigned int granularity
)
1191 q
->limits
.discard_granularity
= granularity
;
1194 static unsigned int drbd_max_discard_sectors(struct drbd_connection
*connection
)
1196 /* when we introduced REQ_WRITE_SAME support, we also bumped
1197 * our maximum supported batch bio size used for discards. */
1198 if (connection
->agreed_features
& DRBD_FF_WSAME
)
1199 return DRBD_MAX_BBIO_SECTORS
;
1200 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1201 return AL_EXTENT_SIZE
>> 9;
1204 static void decide_on_discard_support(struct drbd_device
*device
,
1205 struct request_queue
*q
,
1206 struct request_queue
*b
,
1207 bool discard_zeroes_if_aligned
)
1209 /* q = drbd device queue (device->rq_queue)
1210 * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1211 * or NULL if diskless
1213 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
1214 bool can_do
= b
? blk_queue_discard(b
) : true;
1216 if (can_do
&& connection
->cstate
>= C_CONNECTED
&& !(connection
->agreed_features
& DRBD_FF_TRIM
)) {
1218 drbd_info(connection
, "peer DRBD too old, does not support TRIM: disabling discards\n");
1221 /* We don't care for the granularity, really.
1222 * Stacking limits below should fix it for the local
1223 * device. Whether or not it is a suitable granularity
1224 * on the remote device is not our problem, really. If
1225 * you care, you need to use devices with similar
1226 * topology on all peers. */
1227 blk_queue_discard_granularity(q
, 512);
1228 q
->limits
.max_discard_sectors
= drbd_max_discard_sectors(connection
);
1229 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
1230 q
->limits
.max_write_zeroes_sectors
= drbd_max_discard_sectors(connection
);
1232 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, q
);
1233 blk_queue_discard_granularity(q
, 0);
1234 q
->limits
.max_discard_sectors
= 0;
1235 q
->limits
.max_write_zeroes_sectors
= 0;
1239 static void fixup_discard_if_not_supported(struct request_queue
*q
)
1241 /* To avoid confusion, if this queue does not support discard, clear
1242 * max_discard_sectors, which is what lsblk -D reports to the user.
1243 * Older kernels got this wrong in "stack limits".
1245 if (!blk_queue_discard(q
)) {
1246 blk_queue_max_discard_sectors(q
, 0);
1247 blk_queue_discard_granularity(q
, 0);
1251 static void fixup_write_zeroes(struct drbd_device
*device
, struct request_queue
*q
)
1253 /* Fixup max_write_zeroes_sectors after blk_queue_stack_limits():
1254 * if we can handle "zeroes" efficiently on the protocol,
1255 * we want to do that, even if our backend does not announce
1256 * max_write_zeroes_sectors itself. */
1257 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
1258 /* If the peer announces WZEROES support, use it. Otherwise, rather
1259 * send explicit zeroes than rely on some discard-zeroes-data magic. */
1260 if (connection
->agreed_features
& DRBD_FF_WZEROES
)
1261 q
->limits
.max_write_zeroes_sectors
= DRBD_MAX_BBIO_SECTORS
;
1263 q
->limits
.max_write_zeroes_sectors
= 0;
1266 static void decide_on_write_same_support(struct drbd_device
*device
,
1267 struct request_queue
*q
,
1268 struct request_queue
*b
, struct o_qlim
*o
,
1269 bool disable_write_same
)
1271 struct drbd_peer_device
*peer_device
= first_peer_device(device
);
1272 struct drbd_connection
*connection
= peer_device
->connection
;
1273 bool can_do
= b
? b
->limits
.max_write_same_sectors
: true;
1275 if (can_do
&& disable_write_same
) {
1277 drbd_info(peer_device
, "WRITE_SAME disabled by config\n");
1280 if (can_do
&& connection
->cstate
>= C_CONNECTED
&& !(connection
->agreed_features
& DRBD_FF_WSAME
)) {
1282 drbd_info(peer_device
, "peer does not support WRITE_SAME\n");
1286 /* logical block size; queue_logical_block_size(NULL) is 512 */
1287 unsigned int peer_lbs
= be32_to_cpu(o
->logical_block_size
);
1288 unsigned int me_lbs_b
= queue_logical_block_size(b
);
1289 unsigned int me_lbs
= queue_logical_block_size(q
);
1291 if (me_lbs_b
!= me_lbs
) {
1293 "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
1295 /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
1298 if (me_lbs_b
!= peer_lbs
) {
1299 drbd_warn(peer_device
, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
1302 drbd_dbg(peer_device
, "logical block size mismatch: WRITE_SAME disabled.\n");
1305 me_lbs
= max(me_lbs
, me_lbs_b
);
1306 /* We cannot change the logical block size of an in-use queue.
1307 * We can only hope that access happens to be properly aligned.
1308 * If not, the peer will likely produce an IO error, and detach. */
1309 if (peer_lbs
> me_lbs
) {
1310 if (device
->state
.role
!= R_PRIMARY
) {
1311 blk_queue_logical_block_size(q
, peer_lbs
);
1312 drbd_warn(peer_device
, "logical block size set to %u\n", peer_lbs
);
1314 drbd_warn(peer_device
,
1315 "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
1320 if (can_do
&& !o
->write_same_capable
) {
1321 /* If we introduce an open-coded write-same loop on the receiving side,
1322 * the peer would present itself as "capable". */
1323 drbd_dbg(peer_device
, "WRITE_SAME disabled (peer device not capable)\n");
1328 blk_queue_max_write_same_sectors(q
, can_do
? DRBD_MAX_BBIO_SECTORS
: 0);
1331 static void drbd_setup_queue_param(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
,
1332 unsigned int max_bio_size
, struct o_qlim
*o
)
1334 struct request_queue
* const q
= device
->rq_queue
;
1335 unsigned int max_hw_sectors
= max_bio_size
>> 9;
1336 unsigned int max_segments
= 0;
1337 struct request_queue
*b
= NULL
;
1338 struct disk_conf
*dc
;
1339 bool discard_zeroes_if_aligned
= true;
1340 bool disable_write_same
= false;
1343 b
= bdev
->backing_bdev
->bd_disk
->queue
;
1345 max_hw_sectors
= min(queue_max_hw_sectors(b
), max_bio_size
>> 9);
1347 dc
= rcu_dereference(device
->ldev
->disk_conf
);
1348 max_segments
= dc
->max_bio_bvecs
;
1349 discard_zeroes_if_aligned
= dc
->discard_zeroes_if_aligned
;
1350 disable_write_same
= dc
->disable_write_same
;
1353 blk_set_stacking_limits(&q
->limits
);
1356 blk_queue_max_hw_sectors(q
, max_hw_sectors
);
1357 /* This is the workaround for "bio would need to, but cannot, be split" */
1358 blk_queue_max_segments(q
, max_segments
? max_segments
: BLK_MAX_SEGMENTS
);
1359 blk_queue_segment_boundary(q
, PAGE_SIZE
-1);
1360 decide_on_discard_support(device
, q
, b
, discard_zeroes_if_aligned
);
1361 decide_on_write_same_support(device
, q
, b
, o
, disable_write_same
);
1364 blk_queue_stack_limits(q
, b
);
1366 if (q
->backing_dev_info
->ra_pages
!=
1367 b
->backing_dev_info
->ra_pages
) {
1368 drbd_info(device
, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1369 q
->backing_dev_info
->ra_pages
,
1370 b
->backing_dev_info
->ra_pages
);
1371 q
->backing_dev_info
->ra_pages
=
1372 b
->backing_dev_info
->ra_pages
;
1375 fixup_discard_if_not_supported(q
);
1376 fixup_write_zeroes(device
, q
);
1379 void drbd_reconsider_queue_parameters(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
, struct o_qlim
*o
)
1381 unsigned int now
, new, local
, peer
;
1383 now
= queue_max_hw_sectors(device
->rq_queue
) << 9;
1384 local
= device
->local_max_bio_size
; /* Eventually last known value, from volatile memory */
1385 peer
= device
->peer_max_bio_size
; /* Eventually last known value, from meta data */
1388 local
= queue_max_hw_sectors(bdev
->backing_bdev
->bd_disk
->queue
) << 9;
1389 device
->local_max_bio_size
= local
;
1391 local
= min(local
, DRBD_MAX_BIO_SIZE
);
1393 /* We may ignore peer limits if the peer is modern enough.
1394 Because new from 8.3.8 onwards the peer can use multiple
1395 BIOs for a single peer_request */
1396 if (device
->state
.conn
>= C_WF_REPORT_PARAMS
) {
1397 if (first_peer_device(device
)->connection
->agreed_pro_version
< 94)
1398 peer
= min(device
->peer_max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
1399 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1400 else if (first_peer_device(device
)->connection
->agreed_pro_version
== 94)
1401 peer
= DRBD_MAX_SIZE_H80_PACKET
;
1402 else if (first_peer_device(device
)->connection
->agreed_pro_version
< 100)
1403 peer
= DRBD_MAX_BIO_SIZE_P95
; /* drbd 8.3.8 onwards, before 8.4.0 */
1405 peer
= DRBD_MAX_BIO_SIZE
;
1407 /* We may later detach and re-attach on a disconnected Primary.
1408 * Avoid this setting to jump back in that case.
1409 * We want to store what we know the peer DRBD can handle,
1410 * not what the peer IO backend can handle. */
1411 if (peer
> device
->peer_max_bio_size
)
1412 device
->peer_max_bio_size
= peer
;
1414 new = min(local
, peer
);
1416 if (device
->state
.role
== R_PRIMARY
&& new < now
)
1417 drbd_err(device
, "ASSERT FAILED new < now; (%u < %u)\n", new, now
);
1420 drbd_info(device
, "max BIO size = %u\n", new);
1422 drbd_setup_queue_param(device
, bdev
, new, o
);
1425 /* Starts the worker thread */
1426 static void conn_reconfig_start(struct drbd_connection
*connection
)
1428 drbd_thread_start(&connection
->worker
);
1429 drbd_flush_workqueue(&connection
->sender_work
);
1432 /* if still unconfigured, stops worker again. */
1433 static void conn_reconfig_done(struct drbd_connection
*connection
)
1436 spin_lock_irq(&connection
->resource
->req_lock
);
1437 stop_threads
= conn_all_vols_unconf(connection
) &&
1438 connection
->cstate
== C_STANDALONE
;
1439 spin_unlock_irq(&connection
->resource
->req_lock
);
1441 /* ack_receiver thread and ack_sender workqueue are implicitly
1442 * stopped by receiver in conn_disconnect() */
1443 drbd_thread_stop(&connection
->receiver
);
1444 drbd_thread_stop(&connection
->worker
);
1448 /* Make sure IO is suspended before calling this function(). */
1449 static void drbd_suspend_al(struct drbd_device
*device
)
1453 if (!lc_try_lock(device
->act_log
)) {
1454 drbd_warn(device
, "Failed to lock al in drbd_suspend_al()\n");
1458 drbd_al_shrink(device
);
1459 spin_lock_irq(&device
->resource
->req_lock
);
1460 if (device
->state
.conn
< C_CONNECTED
)
1461 s
= !test_and_set_bit(AL_SUSPENDED
, &device
->flags
);
1462 spin_unlock_irq(&device
->resource
->req_lock
);
1463 lc_unlock(device
->act_log
);
1466 drbd_info(device
, "Suspended AL updates\n");
1470 static bool should_set_defaults(struct genl_info
*info
)
1472 unsigned flags
= ((struct drbd_genlmsghdr
*)info
->userhdr
)->flags
;
1473 return 0 != (flags
& DRBD_GENL_F_SET_DEFAULTS
);
1476 static unsigned int drbd_al_extents_max(struct drbd_backing_dev
*bdev
)
1478 /* This is limited by 16 bit "slot" numbers,
1479 * and by available on-disk context storage.
1481 * Also (u16)~0 is special (denotes a "free" extent).
1483 * One transaction occupies one 4kB on-disk block,
1484 * we have n such blocks in the on disk ring buffer,
1485 * the "current" transaction may fail (n-1),
1486 * and there is 919 slot numbers context information per transaction.
1488 * 72 transaction blocks amounts to more than 2**16 context slots,
1489 * so cap there first.
1491 const unsigned int max_al_nr
= DRBD_AL_EXTENTS_MAX
;
1492 const unsigned int sufficient_on_disk
=
1493 (max_al_nr
+ AL_CONTEXT_PER_TRANSACTION
-1)
1494 /AL_CONTEXT_PER_TRANSACTION
;
1496 unsigned int al_size_4k
= bdev
->md
.al_size_4k
;
1498 if (al_size_4k
> sufficient_on_disk
)
1501 return (al_size_4k
- 1) * AL_CONTEXT_PER_TRANSACTION
;
1504 static bool write_ordering_changed(struct disk_conf
*a
, struct disk_conf
*b
)
1506 return a
->disk_barrier
!= b
->disk_barrier
||
1507 a
->disk_flushes
!= b
->disk_flushes
||
1508 a
->disk_drain
!= b
->disk_drain
;
1511 static void sanitize_disk_conf(struct drbd_device
*device
, struct disk_conf
*disk_conf
,
1512 struct drbd_backing_dev
*nbc
)
1514 struct request_queue
* const q
= nbc
->backing_bdev
->bd_disk
->queue
;
1516 if (disk_conf
->al_extents
< DRBD_AL_EXTENTS_MIN
)
1517 disk_conf
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1518 if (disk_conf
->al_extents
> drbd_al_extents_max(nbc
))
1519 disk_conf
->al_extents
= drbd_al_extents_max(nbc
);
1521 if (!blk_queue_discard(q
)) {
1522 if (disk_conf
->rs_discard_granularity
) {
1523 disk_conf
->rs_discard_granularity
= 0; /* disable feature */
1524 drbd_info(device
, "rs_discard_granularity feature disabled\n");
1528 if (disk_conf
->rs_discard_granularity
) {
1529 int orig_value
= disk_conf
->rs_discard_granularity
;
1532 if (q
->limits
.discard_granularity
> disk_conf
->rs_discard_granularity
)
1533 disk_conf
->rs_discard_granularity
= q
->limits
.discard_granularity
;
1535 remainder
= disk_conf
->rs_discard_granularity
% q
->limits
.discard_granularity
;
1536 disk_conf
->rs_discard_granularity
+= remainder
;
1538 if (disk_conf
->rs_discard_granularity
> q
->limits
.max_discard_sectors
<< 9)
1539 disk_conf
->rs_discard_granularity
= q
->limits
.max_discard_sectors
<< 9;
1541 if (disk_conf
->rs_discard_granularity
!= orig_value
)
1542 drbd_info(device
, "rs_discard_granularity changed to %d\n",
1543 disk_conf
->rs_discard_granularity
);
1547 static int disk_opts_check_al_size(struct drbd_device
*device
, struct disk_conf
*dc
)
1551 if (device
->act_log
&&
1552 device
->act_log
->nr_elements
== dc
->al_extents
)
1555 drbd_suspend_io(device
);
1556 /* If IO completion is currently blocked, we would likely wait
1557 * "forever" for the activity log to become unused. So we don't. */
1558 if (atomic_read(&device
->ap_bio_cnt
))
1561 wait_event(device
->al_wait
, lc_try_lock(device
->act_log
));
1562 drbd_al_shrink(device
);
1563 err
= drbd_check_al_size(device
, dc
);
1564 lc_unlock(device
->act_log
);
1565 wake_up(&device
->al_wait
);
1567 drbd_resume_io(device
);
1571 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1573 struct drbd_config_context adm_ctx
;
1574 enum drbd_ret_code retcode
;
1575 struct drbd_device
*device
;
1576 struct disk_conf
*new_disk_conf
, *old_disk_conf
;
1577 struct fifo_buffer
*old_plan
= NULL
, *new_plan
= NULL
;
1580 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
1581 if (!adm_ctx
.reply_skb
)
1583 if (retcode
!= NO_ERROR
)
1586 device
= adm_ctx
.device
;
1587 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
1589 /* we also need a disk
1590 * to change the options on */
1591 if (!get_ldev(device
)) {
1592 retcode
= ERR_NO_DISK
;
1596 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1597 if (!new_disk_conf
) {
1598 retcode
= ERR_NOMEM
;
1602 mutex_lock(&device
->resource
->conf_update
);
1603 old_disk_conf
= device
->ldev
->disk_conf
;
1604 *new_disk_conf
= *old_disk_conf
;
1605 if (should_set_defaults(info
))
1606 set_disk_conf_defaults(new_disk_conf
);
1608 err
= disk_conf_from_attrs_for_change(new_disk_conf
, info
);
1609 if (err
&& err
!= -ENOMSG
) {
1610 retcode
= ERR_MANDATORY_TAG
;
1611 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
1615 if (!expect(new_disk_conf
->resync_rate
>= 1))
1616 new_disk_conf
->resync_rate
= 1;
1618 sanitize_disk_conf(device
, new_disk_conf
, device
->ldev
);
1620 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1621 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1623 fifo_size
= (new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
1624 if (fifo_size
!= device
->rs_plan_s
->size
) {
1625 new_plan
= fifo_alloc(fifo_size
);
1627 drbd_err(device
, "kmalloc of fifo_buffer failed");
1628 retcode
= ERR_NOMEM
;
1633 err
= disk_opts_check_al_size(device
, new_disk_conf
);
1635 /* Could be just "busy". Ignore?
1636 * Introduce dedicated error code? */
1637 drbd_msg_put_info(adm_ctx
.reply_skb
,
1638 "Try again without changing current al-extents setting");
1639 retcode
= ERR_NOMEM
;
1643 lock_all_resources();
1644 retcode
= drbd_resync_after_valid(device
, new_disk_conf
->resync_after
);
1645 if (retcode
== NO_ERROR
) {
1646 rcu_assign_pointer(device
->ldev
->disk_conf
, new_disk_conf
);
1647 drbd_resync_after_changed(device
);
1649 unlock_all_resources();
1651 if (retcode
!= NO_ERROR
)
1655 old_plan
= device
->rs_plan_s
;
1656 rcu_assign_pointer(device
->rs_plan_s
, new_plan
);
1659 mutex_unlock(&device
->resource
->conf_update
);
1661 if (new_disk_conf
->al_updates
)
1662 device
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
1664 device
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
1666 if (new_disk_conf
->md_flushes
)
1667 clear_bit(MD_NO_FUA
, &device
->flags
);
1669 set_bit(MD_NO_FUA
, &device
->flags
);
1671 if (write_ordering_changed(old_disk_conf
, new_disk_conf
))
1672 drbd_bump_write_ordering(device
->resource
, NULL
, WO_BDEV_FLUSH
);
1674 if (old_disk_conf
->discard_zeroes_if_aligned
!= new_disk_conf
->discard_zeroes_if_aligned
1675 || old_disk_conf
->disable_write_same
!= new_disk_conf
->disable_write_same
)
1676 drbd_reconsider_queue_parameters(device
, device
->ldev
, NULL
);
1678 drbd_md_sync(device
);
1680 if (device
->state
.conn
>= C_CONNECTED
) {
1681 struct drbd_peer_device
*peer_device
;
1683 for_each_peer_device(peer_device
, device
)
1684 drbd_send_sync_param(peer_device
);
1688 kfree(old_disk_conf
);
1690 mod_timer(&device
->request_timer
, jiffies
+ HZ
);
1694 mutex_unlock(&device
->resource
->conf_update
);
1696 kfree(new_disk_conf
);
1701 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
1703 drbd_adm_finish(&adm_ctx
, info
, retcode
);
1707 static struct block_device
*open_backing_dev(struct drbd_device
*device
,
1708 const char *bdev_path
, void *claim_ptr
, bool do_bd_link
)
1710 struct block_device
*bdev
;
1713 bdev
= blkdev_get_by_path(bdev_path
,
1714 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
, claim_ptr
);
1716 drbd_err(device
, "open(\"%s\") failed with %ld\n",
1717 bdev_path
, PTR_ERR(bdev
));
1724 err
= bd_link_disk_holder(bdev
, device
->vdisk
);
1726 blkdev_put(bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1727 drbd_err(device
, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1729 bdev
= ERR_PTR(err
);
1734 static int open_backing_devices(struct drbd_device
*device
,
1735 struct disk_conf
*new_disk_conf
,
1736 struct drbd_backing_dev
*nbc
)
1738 struct block_device
*bdev
;
1740 bdev
= open_backing_dev(device
, new_disk_conf
->backing_dev
, device
, true);
1742 return ERR_OPEN_DISK
;
1743 nbc
->backing_bdev
= bdev
;
1746 * meta_dev_idx >= 0: external fixed size, possibly multiple
1747 * drbd sharing one meta device. TODO in that case, paranoia
1748 * check that [md_bdev, meta_dev_idx] is not yet used by some
1749 * other drbd minor! (if you use drbd.conf + drbdadm, that
1750 * should check it for you already; but if you don't, or
1751 * someone fooled it, we need to double check here)
1753 bdev
= open_backing_dev(device
, new_disk_conf
->meta_dev
,
1754 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1755 * if potentially shared with other drbd minors */
1756 (new_disk_conf
->meta_dev_idx
< 0) ? (void*)device
: (void*)drbd_m_holder
,
1757 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1758 * as would happen with internal metadata. */
1759 (new_disk_conf
->meta_dev_idx
!= DRBD_MD_INDEX_FLEX_INT
&&
1760 new_disk_conf
->meta_dev_idx
!= DRBD_MD_INDEX_INTERNAL
));
1762 return ERR_OPEN_MD_DISK
;
1763 nbc
->md_bdev
= bdev
;
1767 static void close_backing_dev(struct drbd_device
*device
, struct block_device
*bdev
,
1773 bd_unlink_disk_holder(bdev
, device
->vdisk
);
1774 blkdev_put(bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1777 void drbd_backing_dev_free(struct drbd_device
*device
, struct drbd_backing_dev
*ldev
)
1782 close_backing_dev(device
, ldev
->md_bdev
, ldev
->md_bdev
!= ldev
->backing_bdev
);
1783 close_backing_dev(device
, ldev
->backing_bdev
, true);
1785 kfree(ldev
->disk_conf
);
1789 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
)
1791 struct drbd_config_context adm_ctx
;
1792 struct drbd_device
*device
;
1793 struct drbd_peer_device
*peer_device
;
1794 struct drbd_connection
*connection
;
1796 enum drbd_ret_code retcode
;
1797 enum determine_dev_size dd
;
1798 sector_t max_possible_sectors
;
1799 sector_t min_md_device_sectors
;
1800 struct drbd_backing_dev
*nbc
= NULL
; /* new_backing_conf */
1801 struct disk_conf
*new_disk_conf
= NULL
;
1802 struct lru_cache
*resync_lru
= NULL
;
1803 struct fifo_buffer
*new_plan
= NULL
;
1804 union drbd_state ns
, os
;
1805 enum drbd_state_rv rv
;
1806 struct net_conf
*nc
;
1808 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
1809 if (!adm_ctx
.reply_skb
)
1811 if (retcode
!= NO_ERROR
)
1814 device
= adm_ctx
.device
;
1815 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
1816 peer_device
= first_peer_device(device
);
1817 connection
= peer_device
->connection
;
1818 conn_reconfig_start(connection
);
1820 /* if you want to reconfigure, please tear down first */
1821 if (device
->state
.disk
> D_DISKLESS
) {
1822 retcode
= ERR_DISK_CONFIGURED
;
1825 /* It may just now have detached because of IO error. Make sure
1826 * drbd_ldev_destroy is done already, we may end up here very fast,
1827 * e.g. if someone calls attach from the on-io-error handler,
1828 * to realize a "hot spare" feature (not that I'd recommend that) */
1829 wait_event(device
->misc_wait
, !test_bit(GOING_DISKLESS
, &device
->flags
));
1831 /* make sure there is no leftover from previous force-detach attempts */
1832 clear_bit(FORCE_DETACH
, &device
->flags
);
1833 clear_bit(WAS_IO_ERROR
, &device
->flags
);
1834 clear_bit(WAS_READ_ERROR
, &device
->flags
);
1836 /* and no leftover from previously aborted resync or verify, either */
1837 device
->rs_total
= 0;
1838 device
->rs_failed
= 0;
1839 atomic_set(&device
->rs_pending_cnt
, 0);
1841 /* allocation not in the IO path, drbdsetup context */
1842 nbc
= kzalloc(sizeof(struct drbd_backing_dev
), GFP_KERNEL
);
1844 retcode
= ERR_NOMEM
;
1847 spin_lock_init(&nbc
->md
.uuid_lock
);
1849 new_disk_conf
= kzalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1850 if (!new_disk_conf
) {
1851 retcode
= ERR_NOMEM
;
1854 nbc
->disk_conf
= new_disk_conf
;
1856 set_disk_conf_defaults(new_disk_conf
);
1857 err
= disk_conf_from_attrs(new_disk_conf
, info
);
1859 retcode
= ERR_MANDATORY_TAG
;
1860 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
1864 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1865 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1867 new_plan
= fifo_alloc((new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
);
1869 retcode
= ERR_NOMEM
;
1873 if (new_disk_conf
->meta_dev_idx
< DRBD_MD_INDEX_FLEX_INT
) {
1874 retcode
= ERR_MD_IDX_INVALID
;
1879 nc
= rcu_dereference(connection
->net_conf
);
1881 if (new_disk_conf
->fencing
== FP_STONITH
&& nc
->wire_protocol
== DRBD_PROT_A
) {
1883 retcode
= ERR_STONITH_AND_PROT_A
;
1889 retcode
= open_backing_devices(device
, new_disk_conf
, nbc
);
1890 if (retcode
!= NO_ERROR
)
1893 if ((nbc
->backing_bdev
== nbc
->md_bdev
) !=
1894 (new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_INTERNAL
||
1895 new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_FLEX_INT
)) {
1896 retcode
= ERR_MD_IDX_INVALID
;
1900 resync_lru
= lc_create("resync", drbd_bm_ext_cache
,
1901 1, 61, sizeof(struct bm_extent
),
1902 offsetof(struct bm_extent
, lce
));
1904 retcode
= ERR_NOMEM
;
1908 /* Read our meta data super block early.
1909 * This also sets other on-disk offsets. */
1910 retcode
= drbd_md_read(device
, nbc
);
1911 if (retcode
!= NO_ERROR
)
1914 sanitize_disk_conf(device
, new_disk_conf
, nbc
);
1916 if (drbd_get_max_capacity(nbc
) < new_disk_conf
->disk_size
) {
1917 drbd_err(device
, "max capacity %llu smaller than disk size %llu\n",
1918 (unsigned long long) drbd_get_max_capacity(nbc
),
1919 (unsigned long long) new_disk_conf
->disk_size
);
1920 retcode
= ERR_DISK_TOO_SMALL
;
1924 if (new_disk_conf
->meta_dev_idx
< 0) {
1925 max_possible_sectors
= DRBD_MAX_SECTORS_FLEX
;
1926 /* at least one MB, otherwise it does not make sense */
1927 min_md_device_sectors
= (2<<10);
1929 max_possible_sectors
= DRBD_MAX_SECTORS
;
1930 min_md_device_sectors
= MD_128MB_SECT
* (new_disk_conf
->meta_dev_idx
+ 1);
1933 if (drbd_get_capacity(nbc
->md_bdev
) < min_md_device_sectors
) {
1934 retcode
= ERR_MD_DISK_TOO_SMALL
;
1935 drbd_warn(device
, "refusing attach: md-device too small, "
1936 "at least %llu sectors needed for this meta-disk type\n",
1937 (unsigned long long) min_md_device_sectors
);
1941 /* Make sure the new disk is big enough
1942 * (we may currently be R_PRIMARY with no local disk...) */
1943 if (drbd_get_max_capacity(nbc
) <
1944 drbd_get_capacity(device
->this_bdev
)) {
1945 retcode
= ERR_DISK_TOO_SMALL
;
1949 nbc
->known_size
= drbd_get_capacity(nbc
->backing_bdev
);
1951 if (nbc
->known_size
> max_possible_sectors
) {
1952 drbd_warn(device
, "==> truncating very big lower level device "
1953 "to currently maximum possible %llu sectors <==\n",
1954 (unsigned long long) max_possible_sectors
);
1955 if (new_disk_conf
->meta_dev_idx
>= 0)
1956 drbd_warn(device
, "==>> using internal or flexible "
1957 "meta data may help <<==\n");
1960 drbd_suspend_io(device
);
1961 /* also wait for the last barrier ack. */
1962 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1963 * We need a way to either ignore barrier acks for barriers sent before a device
1964 * was attached, or a way to wait for all pending barrier acks to come in.
1965 * As barriers are counted per resource,
1966 * we'd need to suspend io on all devices of a resource.
1968 wait_event(device
->misc_wait
, !atomic_read(&device
->ap_pending_cnt
) || drbd_suspended(device
));
1969 /* and for any other previously queued work */
1970 drbd_flush_workqueue(&connection
->sender_work
);
1972 rv
= _drbd_request_state(device
, NS(disk
, D_ATTACHING
), CS_VERBOSE
);
1973 retcode
= rv
; /* FIXME: Type mismatch. */
1974 drbd_resume_io(device
);
1975 if (rv
< SS_SUCCESS
)
1978 if (!get_ldev_if_state(device
, D_ATTACHING
))
1979 goto force_diskless
;
1981 if (!device
->bitmap
) {
1982 if (drbd_bm_init(device
)) {
1983 retcode
= ERR_NOMEM
;
1984 goto force_diskless_dec
;
1988 if (device
->state
.pdsk
!= D_UP_TO_DATE
&& device
->ed_uuid
&&
1989 (device
->state
.role
== R_PRIMARY
|| device
->state
.peer
== R_PRIMARY
) &&
1990 (device
->ed_uuid
& ~((u64
)1)) != (nbc
->md
.uuid
[UI_CURRENT
] & ~((u64
)1))) {
1991 drbd_err(device
, "Can only attach to data with current UUID=%016llX\n",
1992 (unsigned long long)device
->ed_uuid
);
1993 retcode
= ERR_DATA_NOT_CURRENT
;
1994 goto force_diskless_dec
;
1997 /* Since we are diskless, fix the activity log first... */
1998 if (drbd_check_al_size(device
, new_disk_conf
)) {
1999 retcode
= ERR_NOMEM
;
2000 goto force_diskless_dec
;
2003 /* Prevent shrinking of consistent devices ! */
2005 unsigned long long nsz
= drbd_new_dev_size(device
, nbc
, nbc
->disk_conf
->disk_size
, 0);
2006 unsigned long long eff
= nbc
->md
.la_size_sect
;
2007 if (drbd_md_test_flag(nbc
, MDF_CONSISTENT
) && nsz
< eff
) {
2008 if (nsz
== nbc
->disk_conf
->disk_size
) {
2009 drbd_warn(device
, "truncating a consistent device during attach (%llu < %llu)\n", nsz
, eff
);
2011 drbd_warn(device
, "refusing to truncate a consistent device (%llu < %llu)\n", nsz
, eff
);
2012 drbd_msg_sprintf_info(adm_ctx
.reply_skb
,
2013 "To-be-attached device has last effective > current size, and is consistent\n"
2014 "(%llu > %llu sectors). Refusing to attach.", eff
, nsz
);
2015 retcode
= ERR_IMPLICIT_SHRINK
;
2016 goto force_diskless_dec
;
2021 lock_all_resources();
2022 retcode
= drbd_resync_after_valid(device
, new_disk_conf
->resync_after
);
2023 if (retcode
!= NO_ERROR
) {
2024 unlock_all_resources();
2025 goto force_diskless_dec
;
2028 /* Reset the "barriers don't work" bits here, then force meta data to
2029 * be written, to ensure we determine if barriers are supported. */
2030 if (new_disk_conf
->md_flushes
)
2031 clear_bit(MD_NO_FUA
, &device
->flags
);
2033 set_bit(MD_NO_FUA
, &device
->flags
);
2035 /* Point of no return reached.
2036 * Devices and memory are no longer released by error cleanup below.
2037 * now device takes over responsibility, and the state engine should
2038 * clean it up somewhere. */
2039 D_ASSERT(device
, device
->ldev
== NULL
);
2041 device
->resync
= resync_lru
;
2042 device
->rs_plan_s
= new_plan
;
2045 new_disk_conf
= NULL
;
2048 drbd_resync_after_changed(device
);
2049 drbd_bump_write_ordering(device
->resource
, device
->ldev
, WO_BDEV_FLUSH
);
2050 unlock_all_resources();
2052 if (drbd_md_test_flag(device
->ldev
, MDF_CRASHED_PRIMARY
))
2053 set_bit(CRASHED_PRIMARY
, &device
->flags
);
2055 clear_bit(CRASHED_PRIMARY
, &device
->flags
);
2057 if (drbd_md_test_flag(device
->ldev
, MDF_PRIMARY_IND
) &&
2058 !(device
->state
.role
== R_PRIMARY
&& device
->resource
->susp_nod
))
2059 set_bit(CRASHED_PRIMARY
, &device
->flags
);
2061 device
->send_cnt
= 0;
2062 device
->recv_cnt
= 0;
2063 device
->read_cnt
= 0;
2064 device
->writ_cnt
= 0;
2066 drbd_reconsider_queue_parameters(device
, device
->ldev
, NULL
);
2068 /* If I am currently not R_PRIMARY,
2069 * but meta data primary indicator is set,
2070 * I just now recover from a hard crash,
2071 * and have been R_PRIMARY before that crash.
2073 * Now, if I had no connection before that crash
2074 * (have been degraded R_PRIMARY), chances are that
2075 * I won't find my peer now either.
2077 * In that case, and _only_ in that case,
2078 * we use the degr-wfc-timeout instead of the default,
2079 * so we can automatically recover from a crash of a
2080 * degraded but active "cluster" after a certain timeout.
2082 clear_bit(USE_DEGR_WFC_T
, &device
->flags
);
2083 if (device
->state
.role
!= R_PRIMARY
&&
2084 drbd_md_test_flag(device
->ldev
, MDF_PRIMARY_IND
) &&
2085 !drbd_md_test_flag(device
->ldev
, MDF_CONNECTED_IND
))
2086 set_bit(USE_DEGR_WFC_T
, &device
->flags
);
2088 dd
= drbd_determine_dev_size(device
, 0, NULL
);
2089 if (dd
<= DS_ERROR
) {
2090 retcode
= ERR_NOMEM_BITMAP
;
2091 goto force_diskless_dec
;
2092 } else if (dd
== DS_GREW
)
2093 set_bit(RESYNC_AFTER_NEG
, &device
->flags
);
2095 if (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
) ||
2096 (test_bit(CRASHED_PRIMARY
, &device
->flags
) &&
2097 drbd_md_test_flag(device
->ldev
, MDF_AL_DISABLED
))) {
2098 drbd_info(device
, "Assuming that all blocks are out of sync "
2099 "(aka FullSync)\n");
2100 if (drbd_bitmap_io(device
, &drbd_bmio_set_n_write
,
2101 "set_n_write from attaching", BM_LOCKED_MASK
)) {
2102 retcode
= ERR_IO_MD_DISK
;
2103 goto force_diskless_dec
;
2106 if (drbd_bitmap_io(device
, &drbd_bm_read
,
2107 "read from attaching", BM_LOCKED_MASK
)) {
2108 retcode
= ERR_IO_MD_DISK
;
2109 goto force_diskless_dec
;
2113 if (_drbd_bm_total_weight(device
) == drbd_bm_bits(device
))
2114 drbd_suspend_al(device
); /* IO is still suspended here... */
2116 spin_lock_irq(&device
->resource
->req_lock
);
2117 os
= drbd_read_state(device
);
2119 /* If MDF_CONSISTENT is not set go into inconsistent state,
2120 otherwise investigate MDF_WasUpToDate...
2121 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2122 otherwise into D_CONSISTENT state.
2124 if (drbd_md_test_flag(device
->ldev
, MDF_CONSISTENT
)) {
2125 if (drbd_md_test_flag(device
->ldev
, MDF_WAS_UP_TO_DATE
))
2126 ns
.disk
= D_CONSISTENT
;
2128 ns
.disk
= D_OUTDATED
;
2130 ns
.disk
= D_INCONSISTENT
;
2133 if (drbd_md_test_flag(device
->ldev
, MDF_PEER_OUT_DATED
))
2134 ns
.pdsk
= D_OUTDATED
;
2137 if (ns
.disk
== D_CONSISTENT
&&
2138 (ns
.pdsk
== D_OUTDATED
|| rcu_dereference(device
->ldev
->disk_conf
)->fencing
== FP_DONT_CARE
))
2139 ns
.disk
= D_UP_TO_DATE
;
2141 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2142 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2143 this point, because drbd_request_state() modifies these
2146 if (rcu_dereference(device
->ldev
->disk_conf
)->al_updates
)
2147 device
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
2149 device
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
2153 /* In case we are C_CONNECTED postpone any decision on the new disk
2154 state after the negotiation phase. */
2155 if (device
->state
.conn
== C_CONNECTED
) {
2156 device
->new_state_tmp
.i
= ns
.i
;
2158 ns
.disk
= D_NEGOTIATING
;
2160 /* We expect to receive up-to-date UUIDs soon.
2161 To avoid a race in receive_state, free p_uuid while
2162 holding req_lock. I.e. atomic with the state change */
2163 kfree(device
->p_uuid
);
2164 device
->p_uuid
= NULL
;
2167 rv
= _drbd_set_state(device
, ns
, CS_VERBOSE
, NULL
);
2168 spin_unlock_irq(&device
->resource
->req_lock
);
2170 if (rv
< SS_SUCCESS
)
2171 goto force_diskless_dec
;
2173 mod_timer(&device
->request_timer
, jiffies
+ HZ
);
2175 if (device
->state
.role
== R_PRIMARY
)
2176 device
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
2178 device
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
2180 drbd_md_mark_dirty(device
);
2181 drbd_md_sync(device
);
2183 kobject_uevent(&disk_to_dev(device
->vdisk
)->kobj
, KOBJ_CHANGE
);
2185 conn_reconfig_done(connection
);
2186 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2187 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2193 drbd_force_state(device
, NS(disk
, D_DISKLESS
));
2194 drbd_md_sync(device
);
2196 conn_reconfig_done(connection
);
2198 close_backing_dev(device
, nbc
->md_bdev
, nbc
->md_bdev
!= nbc
->backing_bdev
);
2199 close_backing_dev(device
, nbc
->backing_bdev
, true);
2202 kfree(new_disk_conf
);
2203 lc_destroy(resync_lru
);
2205 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2207 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2211 static int adm_detach(struct drbd_device
*device
, int force
)
2214 set_bit(FORCE_DETACH
, &device
->flags
);
2215 drbd_force_state(device
, NS(disk
, D_FAILED
));
2219 return drbd_request_detach_interruptible(device
);
2222 /* Detaching the disk is a process in multiple stages. First we need to lock
2223 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2224 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2225 * internal references as well.
2226 * Only then we have finally detached. */
2227 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
)
2229 struct drbd_config_context adm_ctx
;
2230 enum drbd_ret_code retcode
;
2231 struct detach_parms parms
= { };
2234 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2235 if (!adm_ctx
.reply_skb
)
2237 if (retcode
!= NO_ERROR
)
2240 if (info
->attrs
[DRBD_NLA_DETACH_PARMS
]) {
2241 err
= detach_parms_from_attrs(&parms
, info
);
2243 retcode
= ERR_MANDATORY_TAG
;
2244 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2249 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2250 retcode
= adm_detach(adm_ctx
.device
, parms
.force_detach
);
2251 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2253 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2257 static bool conn_resync_running(struct drbd_connection
*connection
)
2259 struct drbd_peer_device
*peer_device
;
2264 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2265 struct drbd_device
*device
= peer_device
->device
;
2266 if (device
->state
.conn
== C_SYNC_SOURCE
||
2267 device
->state
.conn
== C_SYNC_TARGET
||
2268 device
->state
.conn
== C_PAUSED_SYNC_S
||
2269 device
->state
.conn
== C_PAUSED_SYNC_T
) {
2279 static bool conn_ov_running(struct drbd_connection
*connection
)
2281 struct drbd_peer_device
*peer_device
;
2286 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2287 struct drbd_device
*device
= peer_device
->device
;
2288 if (device
->state
.conn
== C_VERIFY_S
||
2289 device
->state
.conn
== C_VERIFY_T
) {
2299 static enum drbd_ret_code
2300 _check_net_options(struct drbd_connection
*connection
, struct net_conf
*old_net_conf
, struct net_conf
*new_net_conf
)
2302 struct drbd_peer_device
*peer_device
;
2305 if (old_net_conf
&& connection
->cstate
== C_WF_REPORT_PARAMS
&& connection
->agreed_pro_version
< 100) {
2306 if (new_net_conf
->wire_protocol
!= old_net_conf
->wire_protocol
)
2307 return ERR_NEED_APV_100
;
2309 if (new_net_conf
->two_primaries
!= old_net_conf
->two_primaries
)
2310 return ERR_NEED_APV_100
;
2312 if (strcmp(new_net_conf
->integrity_alg
, old_net_conf
->integrity_alg
))
2313 return ERR_NEED_APV_100
;
2316 if (!new_net_conf
->two_primaries
&&
2317 conn_highest_role(connection
) == R_PRIMARY
&&
2318 conn_highest_peer(connection
) == R_PRIMARY
)
2319 return ERR_NEED_ALLOW_TWO_PRI
;
2321 if (new_net_conf
->two_primaries
&&
2322 (new_net_conf
->wire_protocol
!= DRBD_PROT_C
))
2323 return ERR_NOT_PROTO_C
;
2325 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2326 struct drbd_device
*device
= peer_device
->device
;
2327 if (get_ldev(device
)) {
2328 enum drbd_fencing_p fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
2330 if (new_net_conf
->wire_protocol
== DRBD_PROT_A
&& fp
== FP_STONITH
)
2331 return ERR_STONITH_AND_PROT_A
;
2333 if (device
->state
.role
== R_PRIMARY
&& new_net_conf
->discard_my_data
)
2334 return ERR_DISCARD_IMPOSSIBLE
;
2337 if (new_net_conf
->on_congestion
!= OC_BLOCK
&& new_net_conf
->wire_protocol
!= DRBD_PROT_A
)
2338 return ERR_CONG_NOT_PROTO_A
;
2343 static enum drbd_ret_code
2344 check_net_options(struct drbd_connection
*connection
, struct net_conf
*new_net_conf
)
2346 enum drbd_ret_code rv
;
2347 struct drbd_peer_device
*peer_device
;
2351 rv
= _check_net_options(connection
, rcu_dereference(connection
->net_conf
), new_net_conf
);
2354 /* connection->peer_devices protected by genl_lock() here */
2355 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2356 struct drbd_device
*device
= peer_device
->device
;
2357 if (!device
->bitmap
) {
2358 if (drbd_bm_init(device
))
2367 struct crypto_shash
*verify_tfm
;
2368 struct crypto_shash
*csums_tfm
;
2369 struct crypto_shash
*cram_hmac_tfm
;
2370 struct crypto_shash
*integrity_tfm
;
2374 alloc_shash(struct crypto_shash
**tfm
, char *tfm_name
, int err_alg
)
2379 *tfm
= crypto_alloc_shash(tfm_name
, 0, 0);
2388 static enum drbd_ret_code
2389 alloc_crypto(struct crypto
*crypto
, struct net_conf
*new_net_conf
)
2391 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
2392 enum drbd_ret_code rv
;
2394 rv
= alloc_shash(&crypto
->csums_tfm
, new_net_conf
->csums_alg
,
2398 rv
= alloc_shash(&crypto
->verify_tfm
, new_net_conf
->verify_alg
,
2402 rv
= alloc_shash(&crypto
->integrity_tfm
, new_net_conf
->integrity_alg
,
2406 if (new_net_conf
->cram_hmac_alg
[0] != 0) {
2407 snprintf(hmac_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)",
2408 new_net_conf
->cram_hmac_alg
);
2410 rv
= alloc_shash(&crypto
->cram_hmac_tfm
, hmac_name
,
2417 static void free_crypto(struct crypto
*crypto
)
2419 crypto_free_shash(crypto
->cram_hmac_tfm
);
2420 crypto_free_shash(crypto
->integrity_tfm
);
2421 crypto_free_shash(crypto
->csums_tfm
);
2422 crypto_free_shash(crypto
->verify_tfm
);
2425 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2427 struct drbd_config_context adm_ctx
;
2428 enum drbd_ret_code retcode
;
2429 struct drbd_connection
*connection
;
2430 struct net_conf
*old_net_conf
, *new_net_conf
= NULL
;
2432 int ovr
; /* online verify running */
2433 int rsr
; /* re-sync running */
2434 struct crypto crypto
= { };
2436 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2437 if (!adm_ctx
.reply_skb
)
2439 if (retcode
!= NO_ERROR
)
2442 connection
= adm_ctx
.connection
;
2443 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2445 new_net_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
2446 if (!new_net_conf
) {
2447 retcode
= ERR_NOMEM
;
2451 conn_reconfig_start(connection
);
2453 mutex_lock(&connection
->data
.mutex
);
2454 mutex_lock(&connection
->resource
->conf_update
);
2455 old_net_conf
= connection
->net_conf
;
2457 if (!old_net_conf
) {
2458 drbd_msg_put_info(adm_ctx
.reply_skb
, "net conf missing, try connect");
2459 retcode
= ERR_INVALID_REQUEST
;
2463 *new_net_conf
= *old_net_conf
;
2464 if (should_set_defaults(info
))
2465 set_net_conf_defaults(new_net_conf
);
2467 err
= net_conf_from_attrs_for_change(new_net_conf
, info
);
2468 if (err
&& err
!= -ENOMSG
) {
2469 retcode
= ERR_MANDATORY_TAG
;
2470 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2474 retcode
= check_net_options(connection
, new_net_conf
);
2475 if (retcode
!= NO_ERROR
)
2478 /* re-sync running */
2479 rsr
= conn_resync_running(connection
);
2480 if (rsr
&& strcmp(new_net_conf
->csums_alg
, old_net_conf
->csums_alg
)) {
2481 retcode
= ERR_CSUMS_RESYNC_RUNNING
;
2485 /* online verify running */
2486 ovr
= conn_ov_running(connection
);
2487 if (ovr
&& strcmp(new_net_conf
->verify_alg
, old_net_conf
->verify_alg
)) {
2488 retcode
= ERR_VERIFY_RUNNING
;
2492 retcode
= alloc_crypto(&crypto
, new_net_conf
);
2493 if (retcode
!= NO_ERROR
)
2496 rcu_assign_pointer(connection
->net_conf
, new_net_conf
);
2499 crypto_free_shash(connection
->csums_tfm
);
2500 connection
->csums_tfm
= crypto
.csums_tfm
;
2501 crypto
.csums_tfm
= NULL
;
2504 crypto_free_shash(connection
->verify_tfm
);
2505 connection
->verify_tfm
= crypto
.verify_tfm
;
2506 crypto
.verify_tfm
= NULL
;
2509 crypto_free_shash(connection
->integrity_tfm
);
2510 connection
->integrity_tfm
= crypto
.integrity_tfm
;
2511 if (connection
->cstate
>= C_WF_REPORT_PARAMS
&& connection
->agreed_pro_version
>= 100)
2512 /* Do this without trying to take connection->data.mutex again. */
2513 __drbd_send_protocol(connection
, P_PROTOCOL_UPDATE
);
2515 crypto_free_shash(connection
->cram_hmac_tfm
);
2516 connection
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2518 mutex_unlock(&connection
->resource
->conf_update
);
2519 mutex_unlock(&connection
->data
.mutex
);
2521 kfree(old_net_conf
);
2523 if (connection
->cstate
>= C_WF_REPORT_PARAMS
) {
2524 struct drbd_peer_device
*peer_device
;
2527 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
2528 drbd_send_sync_param(peer_device
);
2534 mutex_unlock(&connection
->resource
->conf_update
);
2535 mutex_unlock(&connection
->data
.mutex
);
2536 free_crypto(&crypto
);
2537 kfree(new_net_conf
);
2539 conn_reconfig_done(connection
);
2541 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2543 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2547 static void connection_to_info(struct connection_info
*info
,
2548 struct drbd_connection
*connection
)
2550 info
->conn_connection_state
= connection
->cstate
;
2551 info
->conn_role
= conn_highest_peer(connection
);
2554 static void peer_device_to_info(struct peer_device_info
*info
,
2555 struct drbd_peer_device
*peer_device
)
2557 struct drbd_device
*device
= peer_device
->device
;
2559 info
->peer_repl_state
=
2560 max_t(enum drbd_conns
, C_WF_REPORT_PARAMS
, device
->state
.conn
);
2561 info
->peer_disk_state
= device
->state
.pdsk
;
2562 info
->peer_resync_susp_user
= device
->state
.user_isp
;
2563 info
->peer_resync_susp_peer
= device
->state
.peer_isp
;
2564 info
->peer_resync_susp_dependency
= device
->state
.aftr_isp
;
2567 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
)
2569 struct connection_info connection_info
;
2570 enum drbd_notification_type flags
;
2571 unsigned int peer_devices
= 0;
2572 struct drbd_config_context adm_ctx
;
2573 struct drbd_peer_device
*peer_device
;
2574 struct net_conf
*old_net_conf
, *new_net_conf
= NULL
;
2575 struct crypto crypto
= { };
2576 struct drbd_resource
*resource
;
2577 struct drbd_connection
*connection
;
2578 enum drbd_ret_code retcode
;
2582 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2584 if (!adm_ctx
.reply_skb
)
2586 if (retcode
!= NO_ERROR
)
2588 if (!(adm_ctx
.my_addr
&& adm_ctx
.peer_addr
)) {
2589 drbd_msg_put_info(adm_ctx
.reply_skb
, "connection endpoint(s) missing");
2590 retcode
= ERR_INVALID_REQUEST
;
2594 /* No need for _rcu here. All reconfiguration is
2595 * strictly serialized on genl_lock(). We are protected against
2596 * concurrent reconfiguration/addition/deletion */
2597 for_each_resource(resource
, &drbd_resources
) {
2598 for_each_connection(connection
, resource
) {
2599 if (nla_len(adm_ctx
.my_addr
) == connection
->my_addr_len
&&
2600 !memcmp(nla_data(adm_ctx
.my_addr
), &connection
->my_addr
,
2601 connection
->my_addr_len
)) {
2602 retcode
= ERR_LOCAL_ADDR
;
2606 if (nla_len(adm_ctx
.peer_addr
) == connection
->peer_addr_len
&&
2607 !memcmp(nla_data(adm_ctx
.peer_addr
), &connection
->peer_addr
,
2608 connection
->peer_addr_len
)) {
2609 retcode
= ERR_PEER_ADDR
;
2615 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2616 connection
= first_connection(adm_ctx
.resource
);
2617 conn_reconfig_start(connection
);
2619 if (connection
->cstate
> C_STANDALONE
) {
2620 retcode
= ERR_NET_CONFIGURED
;
2624 /* allocation not in the IO path, drbdsetup / netlink process context */
2625 new_net_conf
= kzalloc(sizeof(*new_net_conf
), GFP_KERNEL
);
2626 if (!new_net_conf
) {
2627 retcode
= ERR_NOMEM
;
2631 set_net_conf_defaults(new_net_conf
);
2633 err
= net_conf_from_attrs(new_net_conf
, info
);
2634 if (err
&& err
!= -ENOMSG
) {
2635 retcode
= ERR_MANDATORY_TAG
;
2636 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2640 retcode
= check_net_options(connection
, new_net_conf
);
2641 if (retcode
!= NO_ERROR
)
2644 retcode
= alloc_crypto(&crypto
, new_net_conf
);
2645 if (retcode
!= NO_ERROR
)
2648 ((char *)new_net_conf
->shared_secret
)[SHARED_SECRET_MAX
-1] = 0;
2650 drbd_flush_workqueue(&connection
->sender_work
);
2652 mutex_lock(&adm_ctx
.resource
->conf_update
);
2653 old_net_conf
= connection
->net_conf
;
2655 retcode
= ERR_NET_CONFIGURED
;
2656 mutex_unlock(&adm_ctx
.resource
->conf_update
);
2659 rcu_assign_pointer(connection
->net_conf
, new_net_conf
);
2661 conn_free_crypto(connection
);
2662 connection
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2663 connection
->integrity_tfm
= crypto
.integrity_tfm
;
2664 connection
->csums_tfm
= crypto
.csums_tfm
;
2665 connection
->verify_tfm
= crypto
.verify_tfm
;
2667 connection
->my_addr_len
= nla_len(adm_ctx
.my_addr
);
2668 memcpy(&connection
->my_addr
, nla_data(adm_ctx
.my_addr
), connection
->my_addr_len
);
2669 connection
->peer_addr_len
= nla_len(adm_ctx
.peer_addr
);
2670 memcpy(&connection
->peer_addr
, nla_data(adm_ctx
.peer_addr
), connection
->peer_addr_len
);
2672 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2676 connection_to_info(&connection_info
, connection
);
2677 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
2678 mutex_lock(¬ification_mutex
);
2679 notify_connection_state(NULL
, 0, connection
, &connection_info
, NOTIFY_CREATE
| flags
);
2680 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2681 struct peer_device_info peer_device_info
;
2683 peer_device_to_info(&peer_device_info
, peer_device
);
2684 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
2685 notify_peer_device_state(NULL
, 0, peer_device
, &peer_device_info
, NOTIFY_CREATE
| flags
);
2687 mutex_unlock(¬ification_mutex
);
2688 mutex_unlock(&adm_ctx
.resource
->conf_update
);
2691 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2692 struct drbd_device
*device
= peer_device
->device
;
2693 device
->send_cnt
= 0;
2694 device
->recv_cnt
= 0;
2698 retcode
= conn_request_state(connection
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
2700 conn_reconfig_done(connection
);
2701 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2702 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2706 free_crypto(&crypto
);
2707 kfree(new_net_conf
);
2709 conn_reconfig_done(connection
);
2710 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2712 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2716 static enum drbd_state_rv
conn_try_disconnect(struct drbd_connection
*connection
, bool force
)
2718 enum drbd_conns cstate
;
2719 enum drbd_state_rv rv
;
2722 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
),
2723 force
? CS_HARD
: 0);
2726 case SS_NOTHING_TO_DO
:
2728 case SS_ALREADY_STANDALONE
:
2730 case SS_PRIMARY_NOP
:
2731 /* Our state checking code wants to see the peer outdated. */
2732 rv
= conn_request_state(connection
, NS2(conn
, C_DISCONNECTING
, pdsk
, D_OUTDATED
), 0);
2734 if (rv
== SS_OUTDATE_WO_CONN
) /* lost connection before graceful disconnect succeeded */
2735 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
), CS_VERBOSE
);
2738 case SS_CW_FAILED_BY_PEER
:
2739 spin_lock_irq(&connection
->resource
->req_lock
);
2740 cstate
= connection
->cstate
;
2741 spin_unlock_irq(&connection
->resource
->req_lock
);
2742 if (cstate
<= C_WF_CONNECTION
)
2744 /* The peer probably wants to see us outdated. */
2745 rv
= conn_request_state(connection
, NS2(conn
, C_DISCONNECTING
,
2746 disk
, D_OUTDATED
), 0);
2747 if (rv
== SS_IS_DISKLESS
|| rv
== SS_LOWER_THAN_OUTDATED
) {
2748 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
),
2753 /* no special handling necessary */
2756 if (rv
>= SS_SUCCESS
) {
2757 enum drbd_state_rv rv2
;
2758 /* No one else can reconfigure the network while I am here.
2759 * The state handling only uses drbd_thread_stop_nowait(),
2760 * we want to really wait here until the receiver is no more.
2762 drbd_thread_stop(&connection
->receiver
);
2764 /* Race breaker. This additional state change request may be
2765 * necessary, if this was a forced disconnect during a receiver
2766 * restart. We may have "killed" the receiver thread just
2767 * after drbd_receiver() returned. Typically, we should be
2768 * C_STANDALONE already, now, and this becomes a no-op.
2770 rv2
= conn_request_state(connection
, NS(conn
, C_STANDALONE
),
2771 CS_VERBOSE
| CS_HARD
);
2772 if (rv2
< SS_SUCCESS
)
2773 drbd_err(connection
,
2774 "unexpected rv2=%d in conn_try_disconnect()\n",
2776 /* Unlike in DRBD 9, the state engine has generated
2777 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2782 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2784 struct drbd_config_context adm_ctx
;
2785 struct disconnect_parms parms
;
2786 struct drbd_connection
*connection
;
2787 enum drbd_state_rv rv
;
2788 enum drbd_ret_code retcode
;
2791 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2792 if (!adm_ctx
.reply_skb
)
2794 if (retcode
!= NO_ERROR
)
2797 connection
= adm_ctx
.connection
;
2798 memset(&parms
, 0, sizeof(parms
));
2799 if (info
->attrs
[DRBD_NLA_DISCONNECT_PARMS
]) {
2800 err
= disconnect_parms_from_attrs(&parms
, info
);
2802 retcode
= ERR_MANDATORY_TAG
;
2803 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2808 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2809 rv
= conn_try_disconnect(connection
, parms
.force_disconnect
);
2810 if (rv
< SS_SUCCESS
)
2811 retcode
= rv
; /* FIXME: Type mismatch. */
2814 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2816 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2820 void resync_after_online_grow(struct drbd_device
*device
)
2822 int iass
; /* I am sync source */
2824 drbd_info(device
, "Resync of new storage after online grow\n");
2825 if (device
->state
.role
!= device
->state
.peer
)
2826 iass
= (device
->state
.role
== R_PRIMARY
);
2828 iass
= test_bit(RESOLVE_CONFLICTS
, &first_peer_device(device
)->connection
->flags
);
2831 drbd_start_resync(device
, C_SYNC_SOURCE
);
2833 _drbd_request_state(device
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
+ CS_SERIALIZE
);
2836 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
)
2838 struct drbd_config_context adm_ctx
;
2839 struct disk_conf
*old_disk_conf
, *new_disk_conf
= NULL
;
2840 struct resize_parms rs
;
2841 struct drbd_device
*device
;
2842 enum drbd_ret_code retcode
;
2843 enum determine_dev_size dd
;
2844 bool change_al_layout
= false;
2845 enum dds_flags ddsf
;
2849 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2850 if (!adm_ctx
.reply_skb
)
2852 if (retcode
!= NO_ERROR
)
2855 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2856 device
= adm_ctx
.device
;
2857 if (!get_ldev(device
)) {
2858 retcode
= ERR_NO_DISK
;
2862 memset(&rs
, 0, sizeof(struct resize_parms
));
2863 rs
.al_stripes
= device
->ldev
->md
.al_stripes
;
2864 rs
.al_stripe_size
= device
->ldev
->md
.al_stripe_size_4k
* 4;
2865 if (info
->attrs
[DRBD_NLA_RESIZE_PARMS
]) {
2866 err
= resize_parms_from_attrs(&rs
, info
);
2868 retcode
= ERR_MANDATORY_TAG
;
2869 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2874 if (device
->state
.conn
> C_CONNECTED
) {
2875 retcode
= ERR_RESIZE_RESYNC
;
2879 if (device
->state
.role
== R_SECONDARY
&&
2880 device
->state
.peer
== R_SECONDARY
) {
2881 retcode
= ERR_NO_PRIMARY
;
2885 if (rs
.no_resync
&& first_peer_device(device
)->connection
->agreed_pro_version
< 93) {
2886 retcode
= ERR_NEED_APV_93
;
2891 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
2893 if (u_size
!= (sector_t
)rs
.resize_size
) {
2894 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
2895 if (!new_disk_conf
) {
2896 retcode
= ERR_NOMEM
;
2901 if (device
->ldev
->md
.al_stripes
!= rs
.al_stripes
||
2902 device
->ldev
->md
.al_stripe_size_4k
!= rs
.al_stripe_size
/ 4) {
2903 u32 al_size_k
= rs
.al_stripes
* rs
.al_stripe_size
;
2905 if (al_size_k
> (16 * 1024 * 1024)) {
2906 retcode
= ERR_MD_LAYOUT_TOO_BIG
;
2910 if (al_size_k
< MD_32kB_SECT
/2) {
2911 retcode
= ERR_MD_LAYOUT_TOO_SMALL
;
2915 if (device
->state
.conn
!= C_CONNECTED
&& !rs
.resize_force
) {
2916 retcode
= ERR_MD_LAYOUT_CONNECTED
;
2920 change_al_layout
= true;
2923 if (device
->ldev
->known_size
!= drbd_get_capacity(device
->ldev
->backing_bdev
))
2924 device
->ldev
->known_size
= drbd_get_capacity(device
->ldev
->backing_bdev
);
2926 if (new_disk_conf
) {
2927 mutex_lock(&device
->resource
->conf_update
);
2928 old_disk_conf
= device
->ldev
->disk_conf
;
2929 *new_disk_conf
= *old_disk_conf
;
2930 new_disk_conf
->disk_size
= (sector_t
)rs
.resize_size
;
2931 rcu_assign_pointer(device
->ldev
->disk_conf
, new_disk_conf
);
2932 mutex_unlock(&device
->resource
->conf_update
);
2934 kfree(old_disk_conf
);
2935 new_disk_conf
= NULL
;
2938 ddsf
= (rs
.resize_force
? DDSF_FORCED
: 0) | (rs
.no_resync
? DDSF_NO_RESYNC
: 0);
2939 dd
= drbd_determine_dev_size(device
, ddsf
, change_al_layout
? &rs
: NULL
);
2940 drbd_md_sync(device
);
2942 if (dd
== DS_ERROR
) {
2943 retcode
= ERR_NOMEM_BITMAP
;
2945 } else if (dd
== DS_ERROR_SPACE_MD
) {
2946 retcode
= ERR_MD_LAYOUT_NO_FIT
;
2948 } else if (dd
== DS_ERROR_SHRINK
) {
2949 retcode
= ERR_IMPLICIT_SHRINK
;
2953 if (device
->state
.conn
== C_CONNECTED
) {
2955 set_bit(RESIZE_PENDING
, &device
->flags
);
2957 drbd_send_uuids(first_peer_device(device
));
2958 drbd_send_sizes(first_peer_device(device
), 1, ddsf
);
2962 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2964 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2969 kfree(new_disk_conf
);
2973 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2975 struct drbd_config_context adm_ctx
;
2976 enum drbd_ret_code retcode
;
2977 struct res_opts res_opts
;
2980 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2981 if (!adm_ctx
.reply_skb
)
2983 if (retcode
!= NO_ERROR
)
2986 res_opts
= adm_ctx
.resource
->res_opts
;
2987 if (should_set_defaults(info
))
2988 set_res_opts_defaults(&res_opts
);
2990 err
= res_opts_from_attrs(&res_opts
, info
);
2991 if (err
&& err
!= -ENOMSG
) {
2992 retcode
= ERR_MANDATORY_TAG
;
2993 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2997 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2998 err
= set_resource_options(adm_ctx
.resource
, &res_opts
);
3000 retcode
= ERR_INVALID_REQUEST
;
3002 retcode
= ERR_NOMEM
;
3004 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3007 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3011 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
)
3013 struct drbd_config_context adm_ctx
;
3014 struct drbd_device
*device
;
3015 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3017 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3018 if (!adm_ctx
.reply_skb
)
3020 if (retcode
!= NO_ERROR
)
3023 device
= adm_ctx
.device
;
3024 if (!get_ldev(device
)) {
3025 retcode
= ERR_NO_DISK
;
3029 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3031 /* If there is still bitmap IO pending, probably because of a previous
3032 * resync just being finished, wait for it before requesting a new resync.
3033 * Also wait for it's after_state_ch(). */
3034 drbd_suspend_io(device
);
3035 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
3036 drbd_flush_workqueue(&first_peer_device(device
)->connection
->sender_work
);
3038 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
3039 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
3040 * try to start a resync handshake as sync target for full sync.
3042 if (device
->state
.conn
== C_STANDALONE
&& device
->state
.role
== R_SECONDARY
) {
3043 retcode
= drbd_request_state(device
, NS(disk
, D_INCONSISTENT
));
3044 if (retcode
>= SS_SUCCESS
) {
3045 if (drbd_bitmap_io(device
, &drbd_bmio_set_n_write
,
3046 "set_n_write from invalidate", BM_LOCKED_MASK
))
3047 retcode
= ERR_IO_MD_DISK
;
3050 retcode
= drbd_request_state(device
, NS(conn
, C_STARTING_SYNC_T
));
3051 drbd_resume_io(device
);
3052 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3055 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3059 static int drbd_adm_simple_request_state(struct sk_buff
*skb
, struct genl_info
*info
,
3060 union drbd_state mask
, union drbd_state val
)
3062 struct drbd_config_context adm_ctx
;
3063 enum drbd_ret_code retcode
;
3065 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3066 if (!adm_ctx
.reply_skb
)
3068 if (retcode
!= NO_ERROR
)
3071 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3072 retcode
= drbd_request_state(adm_ctx
.device
, mask
, val
);
3073 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3075 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3079 static int drbd_bmio_set_susp_al(struct drbd_device
*device
) __must_hold(local
)
3083 rv
= drbd_bmio_set_n_write(device
);
3084 drbd_suspend_al(device
);
3088 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
)
3090 struct drbd_config_context adm_ctx
;
3091 int retcode
; /* drbd_ret_code, drbd_state_rv */
3092 struct drbd_device
*device
;
3094 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3095 if (!adm_ctx
.reply_skb
)
3097 if (retcode
!= NO_ERROR
)
3100 device
= adm_ctx
.device
;
3101 if (!get_ldev(device
)) {
3102 retcode
= ERR_NO_DISK
;
3106 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3108 /* If there is still bitmap IO pending, probably because of a previous
3109 * resync just being finished, wait for it before requesting a new resync.
3110 * Also wait for it's after_state_ch(). */
3111 drbd_suspend_io(device
);
3112 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
3113 drbd_flush_workqueue(&first_peer_device(device
)->connection
->sender_work
);
3115 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3116 * in the bitmap. Otherwise, try to start a resync handshake
3117 * as sync source for full sync.
3119 if (device
->state
.conn
== C_STANDALONE
&& device
->state
.role
== R_PRIMARY
) {
3120 /* The peer will get a resync upon connect anyways. Just make that
3121 into a full resync. */
3122 retcode
= drbd_request_state(device
, NS(pdsk
, D_INCONSISTENT
));
3123 if (retcode
>= SS_SUCCESS
) {
3124 if (drbd_bitmap_io(device
, &drbd_bmio_set_susp_al
,
3125 "set_n_write from invalidate_peer",
3126 BM_LOCKED_SET_ALLOWED
))
3127 retcode
= ERR_IO_MD_DISK
;
3130 retcode
= drbd_request_state(device
, NS(conn
, C_STARTING_SYNC_S
));
3131 drbd_resume_io(device
);
3132 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3135 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3139 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
)
3141 struct drbd_config_context adm_ctx
;
3142 enum drbd_ret_code retcode
;
3144 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3145 if (!adm_ctx
.reply_skb
)
3147 if (retcode
!= NO_ERROR
)
3150 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3151 if (drbd_request_state(adm_ctx
.device
, NS(user_isp
, 1)) == SS_NOTHING_TO_DO
)
3152 retcode
= ERR_PAUSE_IS_SET
;
3153 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3155 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3159 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
)
3161 struct drbd_config_context adm_ctx
;
3162 union drbd_dev_state s
;
3163 enum drbd_ret_code retcode
;
3165 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3166 if (!adm_ctx
.reply_skb
)
3168 if (retcode
!= NO_ERROR
)
3171 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3172 if (drbd_request_state(adm_ctx
.device
, NS(user_isp
, 0)) == SS_NOTHING_TO_DO
) {
3173 s
= adm_ctx
.device
->state
;
3174 if (s
.conn
== C_PAUSED_SYNC_S
|| s
.conn
== C_PAUSED_SYNC_T
) {
3175 retcode
= s
.aftr_isp
? ERR_PIC_AFTER_DEP
:
3176 s
.peer_isp
? ERR_PIC_PEER_DEP
: ERR_PAUSE_IS_CLEAR
;
3178 retcode
= ERR_PAUSE_IS_CLEAR
;
3181 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3183 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3187 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
)
3189 return drbd_adm_simple_request_state(skb
, info
, NS(susp
, 1));
3192 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
)
3194 struct drbd_config_context adm_ctx
;
3195 struct drbd_device
*device
;
3196 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3198 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3199 if (!adm_ctx
.reply_skb
)
3201 if (retcode
!= NO_ERROR
)
3204 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3205 device
= adm_ctx
.device
;
3206 if (test_bit(NEW_CUR_UUID
, &device
->flags
)) {
3207 if (get_ldev_if_state(device
, D_ATTACHING
)) {
3208 drbd_uuid_new_current(device
);
3211 /* This is effectively a multi-stage "forced down".
3212 * The NEW_CUR_UUID bit is supposedly only set, if we
3213 * lost the replication connection, and are configured
3214 * to freeze IO and wait for some fence-peer handler.
3215 * So we still don't have a replication connection.
3216 * And now we don't have a local disk either. After
3217 * resume, we will fail all pending and new IO, because
3218 * we don't have any data anymore. Which means we will
3219 * eventually be able to terminate all users of this
3220 * device, and then take it down. By bumping the
3221 * "effective" data uuid, we make sure that you really
3222 * need to tear down before you reconfigure, we will
3223 * the refuse to re-connect or re-attach (because no
3224 * matching real data uuid exists).
3227 get_random_bytes(&val
, sizeof(u64
));
3228 drbd_set_ed_uuid(device
, val
);
3229 drbd_warn(device
, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3231 clear_bit(NEW_CUR_UUID
, &device
->flags
);
3233 drbd_suspend_io(device
);
3234 retcode
= drbd_request_state(device
, NS3(susp
, 0, susp_nod
, 0, susp_fen
, 0));
3235 if (retcode
== SS_SUCCESS
) {
3236 if (device
->state
.conn
< C_CONNECTED
)
3237 tl_clear(first_peer_device(device
)->connection
);
3238 if (device
->state
.disk
== D_DISKLESS
|| device
->state
.disk
== D_FAILED
)
3239 tl_restart(first_peer_device(device
)->connection
, FAIL_FROZEN_DISK_IO
);
3241 drbd_resume_io(device
);
3242 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3244 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3248 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
)
3250 return drbd_adm_simple_request_state(skb
, info
, NS(disk
, D_OUTDATED
));
3253 static int nla_put_drbd_cfg_context(struct sk_buff
*skb
,
3254 struct drbd_resource
*resource
,
3255 struct drbd_connection
*connection
,
3256 struct drbd_device
*device
)
3259 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_CFG_CONTEXT
);
3261 goto nla_put_failure
;
3263 nla_put_u32(skb
, T_ctx_volume
, device
->vnr
))
3264 goto nla_put_failure
;
3265 if (nla_put_string(skb
, T_ctx_resource_name
, resource
->name
))
3266 goto nla_put_failure
;
3268 if (connection
->my_addr_len
&&
3269 nla_put(skb
, T_ctx_my_addr
, connection
->my_addr_len
, &connection
->my_addr
))
3270 goto nla_put_failure
;
3271 if (connection
->peer_addr_len
&&
3272 nla_put(skb
, T_ctx_peer_addr
, connection
->peer_addr_len
, &connection
->peer_addr
))
3273 goto nla_put_failure
;
3275 nla_nest_end(skb
, nla
);
3280 nla_nest_cancel(skb
, nla
);
3285 * The generic netlink dump callbacks are called outside the genl_lock(), so
3286 * they cannot use the simple attribute parsing code which uses global
3289 static struct nlattr
*find_cfg_context_attr(const struct nlmsghdr
*nlh
, int attr
)
3291 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
3292 const int maxtype
= ARRAY_SIZE(drbd_cfg_context_nl_policy
) - 1;
3295 nla
= nla_find(nlmsg_attrdata(nlh
, hdrlen
), nlmsg_attrlen(nlh
, hdrlen
),
3296 DRBD_NLA_CFG_CONTEXT
);
3299 return drbd_nla_find_nested(maxtype
, nla
, __nla_type(attr
));
3302 static void resource_to_info(struct resource_info
*, struct drbd_resource
*);
3304 int drbd_adm_dump_resources(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3306 struct drbd_genlmsghdr
*dh
;
3307 struct drbd_resource
*resource
;
3308 struct resource_info resource_info
;
3309 struct resource_statistics resource_statistics
;
3314 for_each_resource_rcu(resource
, &drbd_resources
)
3315 if (resource
== (struct drbd_resource
*)cb
->args
[0])
3316 goto found_resource
;
3317 err
= 0; /* resource was probably deleted */
3320 resource
= list_entry(&drbd_resources
,
3321 struct drbd_resource
, resources
);
3324 list_for_each_entry_continue_rcu(resource
, &drbd_resources
, resources
) {
3331 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3332 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3333 NLM_F_MULTI
, DRBD_ADM_GET_RESOURCES
);
3338 dh
->ret_code
= NO_ERROR
;
3339 err
= nla_put_drbd_cfg_context(skb
, resource
, NULL
, NULL
);
3342 err
= res_opts_to_skb(skb
, &resource
->res_opts
, !capable(CAP_SYS_ADMIN
));
3345 resource_to_info(&resource_info
, resource
);
3346 err
= resource_info_to_skb(skb
, &resource_info
, !capable(CAP_SYS_ADMIN
));
3349 resource_statistics
.res_stat_write_ordering
= resource
->write_ordering
;
3350 err
= resource_statistics_to_skb(skb
, &resource_statistics
, !capable(CAP_SYS_ADMIN
));
3353 cb
->args
[0] = (long)resource
;
3354 genlmsg_end(skb
, dh
);
3364 static void device_to_statistics(struct device_statistics
*s
,
3365 struct drbd_device
*device
)
3367 memset(s
, 0, sizeof(*s
));
3368 s
->dev_upper_blocked
= !may_inc_ap_bio(device
);
3369 if (get_ldev(device
)) {
3370 struct drbd_md
*md
= &device
->ldev
->md
;
3371 u64
*history_uuids
= (u64
*)s
->history_uuids
;
3372 struct request_queue
*q
;
3375 spin_lock_irq(&md
->uuid_lock
);
3376 s
->dev_current_uuid
= md
->uuid
[UI_CURRENT
];
3377 BUILD_BUG_ON(sizeof(s
->history_uuids
) < UI_HISTORY_END
- UI_HISTORY_START
+ 1);
3378 for (n
= 0; n
< UI_HISTORY_END
- UI_HISTORY_START
+ 1; n
++)
3379 history_uuids
[n
] = md
->uuid
[UI_HISTORY_START
+ n
];
3380 for (; n
< HISTORY_UUIDS
; n
++)
3381 history_uuids
[n
] = 0;
3382 s
->history_uuids_len
= HISTORY_UUIDS
;
3383 spin_unlock_irq(&md
->uuid_lock
);
3385 s
->dev_disk_flags
= md
->flags
;
3386 q
= bdev_get_queue(device
->ldev
->backing_bdev
);
3387 s
->dev_lower_blocked
=
3388 bdi_congested(q
->backing_dev_info
,
3389 (1 << WB_async_congested
) |
3390 (1 << WB_sync_congested
));
3393 s
->dev_size
= drbd_get_capacity(device
->this_bdev
);
3394 s
->dev_read
= device
->read_cnt
;
3395 s
->dev_write
= device
->writ_cnt
;
3396 s
->dev_al_writes
= device
->al_writ_cnt
;
3397 s
->dev_bm_writes
= device
->bm_writ_cnt
;
3398 s
->dev_upper_pending
= atomic_read(&device
->ap_bio_cnt
);
3399 s
->dev_lower_pending
= atomic_read(&device
->local_cnt
);
3400 s
->dev_al_suspended
= test_bit(AL_SUSPENDED
, &device
->flags
);
3401 s
->dev_exposed_data_uuid
= device
->ed_uuid
;
3404 static int put_resource_in_arg0(struct netlink_callback
*cb
, int holder_nr
)
3407 struct drbd_resource
*resource
=
3408 (struct drbd_resource
*)cb
->args
[0];
3409 kref_put(&resource
->kref
, drbd_destroy_resource
);
3415 int drbd_adm_dump_devices_done(struct netlink_callback
*cb
) {
3416 return put_resource_in_arg0(cb
, 7);
3419 static void device_to_info(struct device_info
*, struct drbd_device
*);
3421 int drbd_adm_dump_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3423 struct nlattr
*resource_filter
;
3424 struct drbd_resource
*resource
;
3425 struct drbd_device
*uninitialized_var(device
);
3426 int minor
, err
, retcode
;
3427 struct drbd_genlmsghdr
*dh
;
3428 struct device_info device_info
;
3429 struct device_statistics device_statistics
;
3430 struct idr
*idr_to_search
;
3432 resource
= (struct drbd_resource
*)cb
->args
[0];
3433 if (!cb
->args
[0] && !cb
->args
[1]) {
3434 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3435 if (resource_filter
) {
3436 retcode
= ERR_RES_NOT_KNOWN
;
3437 resource
= drbd_find_resource(nla_data(resource_filter
));
3440 cb
->args
[0] = (long)resource
;
3445 minor
= cb
->args
[1];
3446 idr_to_search
= resource
? &resource
->devices
: &drbd_devices
;
3447 device
= idr_get_next(idr_to_search
, &minor
);
3452 idr_for_each_entry_continue(idr_to_search
, device
, minor
) {
3454 goto put_result
; /* only one iteration */
3457 goto out
; /* no more devices */
3460 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3461 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3462 NLM_F_MULTI
, DRBD_ADM_GET_DEVICES
);
3466 dh
->ret_code
= retcode
;
3468 if (retcode
== NO_ERROR
) {
3469 dh
->minor
= device
->minor
;
3470 err
= nla_put_drbd_cfg_context(skb
, device
->resource
, NULL
, device
);
3473 if (get_ldev(device
)) {
3474 struct disk_conf
*disk_conf
=
3475 rcu_dereference(device
->ldev
->disk_conf
);
3477 err
= disk_conf_to_skb(skb
, disk_conf
, !capable(CAP_SYS_ADMIN
));
3482 device_to_info(&device_info
, device
);
3483 err
= device_info_to_skb(skb
, &device_info
, !capable(CAP_SYS_ADMIN
));
3487 device_to_statistics(&device_statistics
, device
);
3488 err
= device_statistics_to_skb(skb
, &device_statistics
, !capable(CAP_SYS_ADMIN
));
3491 cb
->args
[1] = minor
+ 1;
3493 genlmsg_end(skb
, dh
);
3503 int drbd_adm_dump_connections_done(struct netlink_callback
*cb
)
3505 return put_resource_in_arg0(cb
, 6);
3508 enum { SINGLE_RESOURCE
, ITERATE_RESOURCES
};
3510 int drbd_adm_dump_connections(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3512 struct nlattr
*resource_filter
;
3513 struct drbd_resource
*resource
= NULL
, *next_resource
;
3514 struct drbd_connection
*uninitialized_var(connection
);
3515 int err
= 0, retcode
;
3516 struct drbd_genlmsghdr
*dh
;
3517 struct connection_info connection_info
;
3518 struct connection_statistics connection_statistics
;
3521 resource
= (struct drbd_resource
*)cb
->args
[0];
3523 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3524 if (resource_filter
) {
3525 retcode
= ERR_RES_NOT_KNOWN
;
3526 resource
= drbd_find_resource(nla_data(resource_filter
));
3529 cb
->args
[0] = (long)resource
;
3530 cb
->args
[1] = SINGLE_RESOURCE
;
3534 if (list_empty(&drbd_resources
))
3536 resource
= list_first_entry(&drbd_resources
, struct drbd_resource
, resources
);
3537 kref_get(&resource
->kref
);
3538 cb
->args
[0] = (long)resource
;
3539 cb
->args
[1] = ITERATE_RESOURCES
;
3544 mutex_lock(&resource
->conf_update
);
3547 for_each_connection_rcu(connection
, resource
)
3548 if (connection
== (struct drbd_connection
*)cb
->args
[2])
3549 goto found_connection
;
3550 /* connection was probably deleted */
3551 goto no_more_connections
;
3553 connection
= list_entry(&resource
->connections
, struct drbd_connection
, connections
);
3556 list_for_each_entry_continue_rcu(connection
, &resource
->connections
, connections
) {
3557 if (!has_net_conf(connection
))
3560 goto put_result
; /* only one iteration */
3563 no_more_connections
:
3564 if (cb
->args
[1] == ITERATE_RESOURCES
) {
3565 for_each_resource_rcu(next_resource
, &drbd_resources
) {
3566 if (next_resource
== resource
)
3567 goto found_resource
;
3569 /* resource was probably deleted */
3574 list_for_each_entry_continue_rcu(next_resource
, &drbd_resources
, resources
) {
3575 mutex_unlock(&resource
->conf_update
);
3576 kref_put(&resource
->kref
, drbd_destroy_resource
);
3577 resource
= next_resource
;
3578 kref_get(&resource
->kref
);
3579 cb
->args
[0] = (long)resource
;
3583 goto out
; /* no more resources */
3586 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3587 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3588 NLM_F_MULTI
, DRBD_ADM_GET_CONNECTIONS
);
3592 dh
->ret_code
= retcode
;
3594 if (retcode
== NO_ERROR
) {
3595 struct net_conf
*net_conf
;
3597 err
= nla_put_drbd_cfg_context(skb
, resource
, connection
, NULL
);
3600 net_conf
= rcu_dereference(connection
->net_conf
);
3602 err
= net_conf_to_skb(skb
, net_conf
, !capable(CAP_SYS_ADMIN
));
3606 connection_to_info(&connection_info
, connection
);
3607 err
= connection_info_to_skb(skb
, &connection_info
, !capable(CAP_SYS_ADMIN
));
3610 connection_statistics
.conn_congested
= test_bit(NET_CONGESTED
, &connection
->flags
);
3611 err
= connection_statistics_to_skb(skb
, &connection_statistics
, !capable(CAP_SYS_ADMIN
));
3614 cb
->args
[2] = (long)connection
;
3616 genlmsg_end(skb
, dh
);
3622 mutex_unlock(&resource
->conf_update
);
3628 enum mdf_peer_flag
{
3629 MDF_PEER_CONNECTED
= 1 << 0,
3630 MDF_PEER_OUTDATED
= 1 << 1,
3631 MDF_PEER_FENCING
= 1 << 2,
3632 MDF_PEER_FULL_SYNC
= 1 << 3,
3635 static void peer_device_to_statistics(struct peer_device_statistics
*s
,
3636 struct drbd_peer_device
*peer_device
)
3638 struct drbd_device
*device
= peer_device
->device
;
3640 memset(s
, 0, sizeof(*s
));
3641 s
->peer_dev_received
= device
->recv_cnt
;
3642 s
->peer_dev_sent
= device
->send_cnt
;
3643 s
->peer_dev_pending
= atomic_read(&device
->ap_pending_cnt
) +
3644 atomic_read(&device
->rs_pending_cnt
);
3645 s
->peer_dev_unacked
= atomic_read(&device
->unacked_cnt
);
3646 s
->peer_dev_out_of_sync
= drbd_bm_total_weight(device
) << (BM_BLOCK_SHIFT
- 9);
3647 s
->peer_dev_resync_failed
= device
->rs_failed
<< (BM_BLOCK_SHIFT
- 9);
3648 if (get_ldev(device
)) {
3649 struct drbd_md
*md
= &device
->ldev
->md
;
3651 spin_lock_irq(&md
->uuid_lock
);
3652 s
->peer_dev_bitmap_uuid
= md
->uuid
[UI_BITMAP
];
3653 spin_unlock_irq(&md
->uuid_lock
);
3655 (drbd_md_test_flag(device
->ldev
, MDF_CONNECTED_IND
) ?
3656 MDF_PEER_CONNECTED
: 0) +
3657 (drbd_md_test_flag(device
->ldev
, MDF_CONSISTENT
) &&
3658 !drbd_md_test_flag(device
->ldev
, MDF_WAS_UP_TO_DATE
) ?
3659 MDF_PEER_OUTDATED
: 0) +
3660 /* FIXME: MDF_PEER_FENCING? */
3661 (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
) ?
3662 MDF_PEER_FULL_SYNC
: 0);
3667 int drbd_adm_dump_peer_devices_done(struct netlink_callback
*cb
)
3669 return put_resource_in_arg0(cb
, 9);
3672 int drbd_adm_dump_peer_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3674 struct nlattr
*resource_filter
;
3675 struct drbd_resource
*resource
;
3676 struct drbd_device
*uninitialized_var(device
);
3677 struct drbd_peer_device
*peer_device
= NULL
;
3678 int minor
, err
, retcode
;
3679 struct drbd_genlmsghdr
*dh
;
3680 struct idr
*idr_to_search
;
3682 resource
= (struct drbd_resource
*)cb
->args
[0];
3683 if (!cb
->args
[0] && !cb
->args
[1]) {
3684 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3685 if (resource_filter
) {
3686 retcode
= ERR_RES_NOT_KNOWN
;
3687 resource
= drbd_find_resource(nla_data(resource_filter
));
3691 cb
->args
[0] = (long)resource
;
3695 minor
= cb
->args
[1];
3696 idr_to_search
= resource
? &resource
->devices
: &drbd_devices
;
3697 device
= idr_find(idr_to_search
, minor
);
3702 device
= idr_get_next(idr_to_search
, &minor
);
3709 for_each_peer_device(peer_device
, device
)
3710 if (peer_device
== (struct drbd_peer_device
*)cb
->args
[2])
3711 goto found_peer_device
;
3712 /* peer device was probably deleted */
3715 /* Make peer_device point to the list head (not the first entry). */
3716 peer_device
= list_entry(&device
->peer_devices
, struct drbd_peer_device
, peer_devices
);
3719 list_for_each_entry_continue_rcu(peer_device
, &device
->peer_devices
, peer_devices
) {
3720 if (!has_net_conf(peer_device
->connection
))
3723 goto put_result
; /* only one iteration */
3728 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3729 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3730 NLM_F_MULTI
, DRBD_ADM_GET_PEER_DEVICES
);
3734 dh
->ret_code
= retcode
;
3736 if (retcode
== NO_ERROR
) {
3737 struct peer_device_info peer_device_info
;
3738 struct peer_device_statistics peer_device_statistics
;
3741 err
= nla_put_drbd_cfg_context(skb
, device
->resource
, peer_device
->connection
, device
);
3744 peer_device_to_info(&peer_device_info
, peer_device
);
3745 err
= peer_device_info_to_skb(skb
, &peer_device_info
, !capable(CAP_SYS_ADMIN
));
3748 peer_device_to_statistics(&peer_device_statistics
, peer_device
);
3749 err
= peer_device_statistics_to_skb(skb
, &peer_device_statistics
, !capable(CAP_SYS_ADMIN
));
3752 cb
->args
[1] = minor
;
3753 cb
->args
[2] = (long)peer_device
;
3755 genlmsg_end(skb
, dh
);
3765 * Return the connection of @resource if @resource has exactly one connection.
3767 static struct drbd_connection
*the_only_connection(struct drbd_resource
*resource
)
3769 struct list_head
*connections
= &resource
->connections
;
3771 if (list_empty(connections
) || connections
->next
->next
!= connections
)
3773 return list_first_entry(&resource
->connections
, struct drbd_connection
, connections
);
3776 static int nla_put_status_info(struct sk_buff
*skb
, struct drbd_device
*device
,
3777 const struct sib_info
*sib
)
3779 struct drbd_resource
*resource
= device
->resource
;
3780 struct state_info
*si
= NULL
; /* for sizeof(si->member); */
3784 int exclude_sensitive
;
3786 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3787 * to. So we better exclude_sensitive information.
3789 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3790 * in the context of the requesting user process. Exclude sensitive
3791 * information, unless current has superuser.
3793 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3794 * relies on the current implementation of netlink_dump(), which
3795 * executes the dump callback successively from netlink_recvmsg(),
3796 * always in the context of the receiving process */
3797 exclude_sensitive
= sib
|| !capable(CAP_SYS_ADMIN
);
3799 got_ldev
= get_ldev(device
);
3801 /* We need to add connection name and volume number information still.
3802 * Minor number is in drbd_genlmsghdr. */
3803 if (nla_put_drbd_cfg_context(skb
, resource
, the_only_connection(resource
), device
))
3804 goto nla_put_failure
;
3806 if (res_opts_to_skb(skb
, &device
->resource
->res_opts
, exclude_sensitive
))
3807 goto nla_put_failure
;
3811 struct disk_conf
*disk_conf
;
3813 disk_conf
= rcu_dereference(device
->ldev
->disk_conf
);
3814 err
= disk_conf_to_skb(skb
, disk_conf
, exclude_sensitive
);
3817 struct net_conf
*nc
;
3819 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
3821 err
= net_conf_to_skb(skb
, nc
, exclude_sensitive
);
3825 goto nla_put_failure
;
3827 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_STATE_INFO
);
3829 goto nla_put_failure
;
3830 if (nla_put_u32(skb
, T_sib_reason
, sib
? sib
->sib_reason
: SIB_GET_STATUS_REPLY
) ||
3831 nla_put_u32(skb
, T_current_state
, device
->state
.i
) ||
3832 nla_put_u64_0pad(skb
, T_ed_uuid
, device
->ed_uuid
) ||
3833 nla_put_u64_0pad(skb
, T_capacity
,
3834 drbd_get_capacity(device
->this_bdev
)) ||
3835 nla_put_u64_0pad(skb
, T_send_cnt
, device
->send_cnt
) ||
3836 nla_put_u64_0pad(skb
, T_recv_cnt
, device
->recv_cnt
) ||
3837 nla_put_u64_0pad(skb
, T_read_cnt
, device
->read_cnt
) ||
3838 nla_put_u64_0pad(skb
, T_writ_cnt
, device
->writ_cnt
) ||
3839 nla_put_u64_0pad(skb
, T_al_writ_cnt
, device
->al_writ_cnt
) ||
3840 nla_put_u64_0pad(skb
, T_bm_writ_cnt
, device
->bm_writ_cnt
) ||
3841 nla_put_u32(skb
, T_ap_bio_cnt
, atomic_read(&device
->ap_bio_cnt
)) ||
3842 nla_put_u32(skb
, T_ap_pending_cnt
, atomic_read(&device
->ap_pending_cnt
)) ||
3843 nla_put_u32(skb
, T_rs_pending_cnt
, atomic_read(&device
->rs_pending_cnt
)))
3844 goto nla_put_failure
;
3849 spin_lock_irq(&device
->ldev
->md
.uuid_lock
);
3850 err
= nla_put(skb
, T_uuids
, sizeof(si
->uuids
), device
->ldev
->md
.uuid
);
3851 spin_unlock_irq(&device
->ldev
->md
.uuid_lock
);
3854 goto nla_put_failure
;
3856 if (nla_put_u32(skb
, T_disk_flags
, device
->ldev
->md
.flags
) ||
3857 nla_put_u64_0pad(skb
, T_bits_total
, drbd_bm_bits(device
)) ||
3858 nla_put_u64_0pad(skb
, T_bits_oos
,
3859 drbd_bm_total_weight(device
)))
3860 goto nla_put_failure
;
3861 if (C_SYNC_SOURCE
<= device
->state
.conn
&&
3862 C_PAUSED_SYNC_T
>= device
->state
.conn
) {
3863 if (nla_put_u64_0pad(skb
, T_bits_rs_total
,
3864 device
->rs_total
) ||
3865 nla_put_u64_0pad(skb
, T_bits_rs_failed
,
3867 goto nla_put_failure
;
3872 switch(sib
->sib_reason
) {
3873 case SIB_SYNC_PROGRESS
:
3874 case SIB_GET_STATUS_REPLY
:
3876 case SIB_STATE_CHANGE
:
3877 if (nla_put_u32(skb
, T_prev_state
, sib
->os
.i
) ||
3878 nla_put_u32(skb
, T_new_state
, sib
->ns
.i
))
3879 goto nla_put_failure
;
3881 case SIB_HELPER_POST
:
3882 if (nla_put_u32(skb
, T_helper_exit_code
,
3883 sib
->helper_exit_code
))
3884 goto nla_put_failure
;
3886 case SIB_HELPER_PRE
:
3887 if (nla_put_string(skb
, T_helper
, sib
->helper_name
))
3888 goto nla_put_failure
;
3892 nla_nest_end(skb
, nla
);
3902 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
)
3904 struct drbd_config_context adm_ctx
;
3905 enum drbd_ret_code retcode
;
3908 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3909 if (!adm_ctx
.reply_skb
)
3911 if (retcode
!= NO_ERROR
)
3914 err
= nla_put_status_info(adm_ctx
.reply_skb
, adm_ctx
.device
, NULL
);
3916 nlmsg_free(adm_ctx
.reply_skb
);
3920 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3924 static int get_one_status(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3926 struct drbd_device
*device
;
3927 struct drbd_genlmsghdr
*dh
;
3928 struct drbd_resource
*pos
= (struct drbd_resource
*)cb
->args
[0];
3929 struct drbd_resource
*resource
= NULL
;
3930 struct drbd_resource
*tmp
;
3931 unsigned volume
= cb
->args
[1];
3933 /* Open coded, deferred, iteration:
3934 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3935 * connection = "first connection of resource or undefined";
3936 * idr_for_each_entry(&resource->devices, device, i) {
3940 * where resource is cb->args[0];
3941 * and i is cb->args[1];
3943 * cb->args[2] indicates if we shall loop over all resources,
3944 * or just dump all volumes of a single resource.
3946 * This may miss entries inserted after this dump started,
3947 * or entries deleted before they are reached.
3949 * We need to make sure the device won't disappear while
3950 * we are looking at it, and revalidate our iterators
3951 * on each iteration.
3954 /* synchronize with conn_create()/drbd_destroy_connection() */
3956 /* revalidate iterator position */
3957 for_each_resource_rcu(tmp
, &drbd_resources
) {
3959 /* first iteration */
3971 device
= idr_get_next(&resource
->devices
, &volume
);
3973 /* No more volumes to dump on this resource.
3974 * Advance resource iterator. */
3975 pos
= list_entry_rcu(resource
->resources
.next
,
3976 struct drbd_resource
, resources
);
3977 /* Did we dump any volume of this resource yet? */
3979 /* If we reached the end of the list,
3980 * or only a single resource dump was requested,
3982 if (&pos
->resources
== &drbd_resources
|| cb
->args
[2])
3990 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3991 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3992 NLM_F_MULTI
, DRBD_ADM_GET_STATUS
);
3997 /* This is a connection without a single volume.
3998 * Suprisingly enough, it may have a network
4000 struct drbd_connection
*connection
;
4003 dh
->ret_code
= NO_ERROR
;
4004 connection
= the_only_connection(resource
);
4005 if (nla_put_drbd_cfg_context(skb
, resource
, connection
, NULL
))
4008 struct net_conf
*nc
;
4010 nc
= rcu_dereference(connection
->net_conf
);
4011 if (nc
&& net_conf_to_skb(skb
, nc
, 1) != 0)
4017 D_ASSERT(device
, device
->vnr
== volume
);
4018 D_ASSERT(device
, device
->resource
== resource
);
4020 dh
->minor
= device_to_minor(device
);
4021 dh
->ret_code
= NO_ERROR
;
4023 if (nla_put_status_info(skb
, device
, NULL
)) {
4025 genlmsg_cancel(skb
, dh
);
4029 genlmsg_end(skb
, dh
);
4034 /* where to start the next iteration */
4035 cb
->args
[0] = (long)pos
;
4036 cb
->args
[1] = (pos
== resource
) ? volume
+ 1 : 0;
4038 /* No more resources/volumes/minors found results in an empty skb.
4039 * Which will terminate the dump. */
4044 * Request status of all resources, or of all volumes within a single resource.
4046 * This is a dump, as the answer may not fit in a single reply skb otherwise.
4047 * Which means we cannot use the family->attrbuf or other such members, because
4048 * dump is NOT protected by the genl_lock(). During dump, we only have access
4049 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4051 * Once things are setup properly, we call into get_one_status().
4053 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4055 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
4057 const char *resource_name
;
4058 struct drbd_resource
*resource
;
4061 /* Is this a followup call? */
4063 /* ... of a single resource dump,
4064 * and the resource iterator has been advanced already? */
4065 if (cb
->args
[2] && cb
->args
[2] != cb
->args
[0])
4066 return 0; /* DONE. */
4070 /* First call (from netlink_dump_start). We need to figure out
4071 * which resource(s) the user wants us to dump. */
4072 nla
= nla_find(nlmsg_attrdata(cb
->nlh
, hdrlen
),
4073 nlmsg_attrlen(cb
->nlh
, hdrlen
),
4074 DRBD_NLA_CFG_CONTEXT
);
4076 /* No explicit context given. Dump all. */
4079 maxtype
= ARRAY_SIZE(drbd_cfg_context_nl_policy
) - 1;
4080 nla
= drbd_nla_find_nested(maxtype
, nla
, __nla_type(T_ctx_resource_name
));
4082 return PTR_ERR(nla
);
4083 /* context given, but no name present? */
4086 resource_name
= nla_data(nla
);
4087 if (!*resource_name
)
4089 resource
= drbd_find_resource(resource_name
);
4093 kref_put(&resource
->kref
, drbd_destroy_resource
); /* get_one_status() revalidates the resource */
4095 /* prime iterators, and set "filter" mode mark:
4096 * only dump this connection. */
4097 cb
->args
[0] = (long)resource
;
4098 /* cb->args[1] = 0; passed in this way. */
4099 cb
->args
[2] = (long)resource
;
4102 return get_one_status(skb
, cb
);
4105 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
)
4107 struct drbd_config_context adm_ctx
;
4108 enum drbd_ret_code retcode
;
4109 struct timeout_parms tp
;
4112 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4113 if (!adm_ctx
.reply_skb
)
4115 if (retcode
!= NO_ERROR
)
4119 adm_ctx
.device
->state
.pdsk
== D_OUTDATED
? UT_PEER_OUTDATED
:
4120 test_bit(USE_DEGR_WFC_T
, &adm_ctx
.device
->flags
) ? UT_DEGRADED
:
4123 err
= timeout_parms_to_priv_skb(adm_ctx
.reply_skb
, &tp
);
4125 nlmsg_free(adm_ctx
.reply_skb
);
4129 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4133 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
)
4135 struct drbd_config_context adm_ctx
;
4136 struct drbd_device
*device
;
4137 enum drbd_ret_code retcode
;
4138 struct start_ov_parms parms
;
4140 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4141 if (!adm_ctx
.reply_skb
)
4143 if (retcode
!= NO_ERROR
)
4146 device
= adm_ctx
.device
;
4148 /* resume from last known position, if possible */
4149 parms
.ov_start_sector
= device
->ov_start_sector
;
4150 parms
.ov_stop_sector
= ULLONG_MAX
;
4151 if (info
->attrs
[DRBD_NLA_START_OV_PARMS
]) {
4152 int err
= start_ov_parms_from_attrs(&parms
, info
);
4154 retcode
= ERR_MANDATORY_TAG
;
4155 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4159 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4161 /* w_make_ov_request expects position to be aligned */
4162 device
->ov_start_sector
= parms
.ov_start_sector
& ~(BM_SECT_PER_BIT
-1);
4163 device
->ov_stop_sector
= parms
.ov_stop_sector
;
4165 /* If there is still bitmap IO pending, e.g. previous resync or verify
4166 * just being finished, wait for it before requesting a new resync. */
4167 drbd_suspend_io(device
);
4168 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
4169 retcode
= drbd_request_state(device
, NS(conn
, C_VERIFY_S
));
4170 drbd_resume_io(device
);
4172 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4174 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4179 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
)
4181 struct drbd_config_context adm_ctx
;
4182 struct drbd_device
*device
;
4183 enum drbd_ret_code retcode
;
4184 int skip_initial_sync
= 0;
4186 struct new_c_uuid_parms args
;
4188 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4189 if (!adm_ctx
.reply_skb
)
4191 if (retcode
!= NO_ERROR
)
4194 device
= adm_ctx
.device
;
4195 memset(&args
, 0, sizeof(args
));
4196 if (info
->attrs
[DRBD_NLA_NEW_C_UUID_PARMS
]) {
4197 err
= new_c_uuid_parms_from_attrs(&args
, info
);
4199 retcode
= ERR_MANDATORY_TAG
;
4200 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4205 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4206 mutex_lock(device
->state_mutex
); /* Protects us against serialized state changes. */
4208 if (!get_ldev(device
)) {
4209 retcode
= ERR_NO_DISK
;
4213 /* this is "skip initial sync", assume to be clean */
4214 if (device
->state
.conn
== C_CONNECTED
&&
4215 first_peer_device(device
)->connection
->agreed_pro_version
>= 90 &&
4216 device
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&& args
.clear_bm
) {
4217 drbd_info(device
, "Preparing to skip initial sync\n");
4218 skip_initial_sync
= 1;
4219 } else if (device
->state
.conn
!= C_STANDALONE
) {
4220 retcode
= ERR_CONNECTED
;
4224 drbd_uuid_set(device
, UI_BITMAP
, 0); /* Rotate UI_BITMAP to History 1, etc... */
4225 drbd_uuid_new_current(device
); /* New current, previous to UI_BITMAP */
4227 if (args
.clear_bm
) {
4228 err
= drbd_bitmap_io(device
, &drbd_bmio_clear_n_write
,
4229 "clear_n_write from new_c_uuid", BM_LOCKED_MASK
);
4231 drbd_err(device
, "Writing bitmap failed with %d\n", err
);
4232 retcode
= ERR_IO_MD_DISK
;
4234 if (skip_initial_sync
) {
4235 drbd_send_uuids_skip_initial_sync(first_peer_device(device
));
4236 _drbd_uuid_set(device
, UI_BITMAP
, 0);
4237 drbd_print_uuids(device
, "cleared bitmap UUID");
4238 spin_lock_irq(&device
->resource
->req_lock
);
4239 _drbd_set_state(_NS2(device
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
4241 spin_unlock_irq(&device
->resource
->req_lock
);
4245 drbd_md_sync(device
);
4249 mutex_unlock(device
->state_mutex
);
4250 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4252 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4256 static enum drbd_ret_code
4257 drbd_check_resource_name(struct drbd_config_context
*adm_ctx
)
4259 const char *name
= adm_ctx
->resource_name
;
4260 if (!name
|| !name
[0]) {
4261 drbd_msg_put_info(adm_ctx
->reply_skb
, "resource name missing");
4262 return ERR_MANDATORY_TAG
;
4264 /* if we want to use these in sysfs/configfs/debugfs some day,
4265 * we must not allow slashes */
4266 if (strchr(name
, '/')) {
4267 drbd_msg_put_info(adm_ctx
->reply_skb
, "invalid resource name");
4268 return ERR_INVALID_REQUEST
;
4273 static void resource_to_info(struct resource_info
*info
,
4274 struct drbd_resource
*resource
)
4276 info
->res_role
= conn_highest_role(first_connection(resource
));
4277 info
->res_susp
= resource
->susp
;
4278 info
->res_susp_nod
= resource
->susp_nod
;
4279 info
->res_susp_fen
= resource
->susp_fen
;
4282 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
)
4284 struct drbd_connection
*connection
;
4285 struct drbd_config_context adm_ctx
;
4286 enum drbd_ret_code retcode
;
4287 struct res_opts res_opts
;
4290 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, 0);
4291 if (!adm_ctx
.reply_skb
)
4293 if (retcode
!= NO_ERROR
)
4296 set_res_opts_defaults(&res_opts
);
4297 err
= res_opts_from_attrs(&res_opts
, info
);
4298 if (err
&& err
!= -ENOMSG
) {
4299 retcode
= ERR_MANDATORY_TAG
;
4300 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4304 retcode
= drbd_check_resource_name(&adm_ctx
);
4305 if (retcode
!= NO_ERROR
)
4308 if (adm_ctx
.resource
) {
4309 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
) {
4310 retcode
= ERR_INVALID_REQUEST
;
4311 drbd_msg_put_info(adm_ctx
.reply_skb
, "resource exists");
4313 /* else: still NO_ERROR */
4317 /* not yet safe for genl_family.parallel_ops */
4318 mutex_lock(&resources_mutex
);
4319 connection
= conn_create(adm_ctx
.resource_name
, &res_opts
);
4320 mutex_unlock(&resources_mutex
);
4323 struct resource_info resource_info
;
4325 mutex_lock(¬ification_mutex
);
4326 resource_to_info(&resource_info
, connection
->resource
);
4327 notify_resource_state(NULL
, 0, connection
->resource
,
4328 &resource_info
, NOTIFY_CREATE
);
4329 mutex_unlock(¬ification_mutex
);
4331 retcode
= ERR_NOMEM
;
4334 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4338 static void device_to_info(struct device_info
*info
,
4339 struct drbd_device
*device
)
4341 info
->dev_disk_state
= device
->state
.disk
;
4345 int drbd_adm_new_minor(struct sk_buff
*skb
, struct genl_info
*info
)
4347 struct drbd_config_context adm_ctx
;
4348 struct drbd_genlmsghdr
*dh
= info
->userhdr
;
4349 enum drbd_ret_code retcode
;
4351 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4352 if (!adm_ctx
.reply_skb
)
4354 if (retcode
!= NO_ERROR
)
4357 if (dh
->minor
> MINORMASK
) {
4358 drbd_msg_put_info(adm_ctx
.reply_skb
, "requested minor out of range");
4359 retcode
= ERR_INVALID_REQUEST
;
4362 if (adm_ctx
.volume
> DRBD_VOLUME_MAX
) {
4363 drbd_msg_put_info(adm_ctx
.reply_skb
, "requested volume id out of range");
4364 retcode
= ERR_INVALID_REQUEST
;
4368 /* drbd_adm_prepare made sure already
4369 * that first_peer_device(device)->connection and device->vnr match the request. */
4370 if (adm_ctx
.device
) {
4371 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
4372 retcode
= ERR_MINOR_OR_VOLUME_EXISTS
;
4373 /* else: still NO_ERROR */
4377 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4378 retcode
= drbd_create_device(&adm_ctx
, dh
->minor
);
4379 if (retcode
== NO_ERROR
) {
4380 struct drbd_device
*device
;
4381 struct drbd_peer_device
*peer_device
;
4382 struct device_info info
;
4383 unsigned int peer_devices
= 0;
4384 enum drbd_notification_type flags
;
4386 device
= minor_to_device(dh
->minor
);
4387 for_each_peer_device(peer_device
, device
) {
4388 if (!has_net_conf(peer_device
->connection
))
4393 device_to_info(&info
, device
);
4394 mutex_lock(¬ification_mutex
);
4395 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
4396 notify_device_state(NULL
, 0, device
, &info
, NOTIFY_CREATE
| flags
);
4397 for_each_peer_device(peer_device
, device
) {
4398 struct peer_device_info peer_device_info
;
4400 if (!has_net_conf(peer_device
->connection
))
4402 peer_device_to_info(&peer_device_info
, peer_device
);
4403 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
4404 notify_peer_device_state(NULL
, 0, peer_device
, &peer_device_info
,
4405 NOTIFY_CREATE
| flags
);
4407 mutex_unlock(¬ification_mutex
);
4409 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4411 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4415 static enum drbd_ret_code
adm_del_minor(struct drbd_device
*device
)
4417 struct drbd_peer_device
*peer_device
;
4419 if (device
->state
.disk
== D_DISKLESS
&&
4420 /* no need to be device->state.conn == C_STANDALONE &&
4421 * we may want to delete a minor from a live replication group.
4423 device
->state
.role
== R_SECONDARY
) {
4424 struct drbd_connection
*connection
=
4425 first_connection(device
->resource
);
4427 _drbd_request_state(device
, NS(conn
, C_WF_REPORT_PARAMS
),
4428 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
4430 /* If the state engine hasn't stopped the sender thread yet, we
4431 * need to flush the sender work queue before generating the
4432 * DESTROY events here. */
4433 if (get_t_state(&connection
->worker
) == RUNNING
)
4434 drbd_flush_workqueue(&connection
->sender_work
);
4436 mutex_lock(¬ification_mutex
);
4437 for_each_peer_device(peer_device
, device
) {
4438 if (!has_net_conf(peer_device
->connection
))
4440 notify_peer_device_state(NULL
, 0, peer_device
, NULL
,
4441 NOTIFY_DESTROY
| NOTIFY_CONTINUES
);
4443 notify_device_state(NULL
, 0, device
, NULL
, NOTIFY_DESTROY
);
4444 mutex_unlock(¬ification_mutex
);
4446 drbd_delete_device(device
);
4449 return ERR_MINOR_CONFIGURED
;
4452 int drbd_adm_del_minor(struct sk_buff
*skb
, struct genl_info
*info
)
4454 struct drbd_config_context adm_ctx
;
4455 enum drbd_ret_code retcode
;
4457 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4458 if (!adm_ctx
.reply_skb
)
4460 if (retcode
!= NO_ERROR
)
4463 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4464 retcode
= adm_del_minor(adm_ctx
.device
);
4465 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4467 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4471 static int adm_del_resource(struct drbd_resource
*resource
)
4473 struct drbd_connection
*connection
;
4475 for_each_connection(connection
, resource
) {
4476 if (connection
->cstate
> C_STANDALONE
)
4477 return ERR_NET_CONFIGURED
;
4479 if (!idr_is_empty(&resource
->devices
))
4480 return ERR_RES_IN_USE
;
4482 /* The state engine has stopped the sender thread, so we don't
4483 * need to flush the sender work queue before generating the
4484 * DESTROY event here. */
4485 mutex_lock(¬ification_mutex
);
4486 notify_resource_state(NULL
, 0, resource
, NULL
, NOTIFY_DESTROY
);
4487 mutex_unlock(¬ification_mutex
);
4489 mutex_lock(&resources_mutex
);
4490 list_del_rcu(&resource
->resources
);
4491 mutex_unlock(&resources_mutex
);
4492 /* Make sure all threads have actually stopped: state handling only
4493 * does drbd_thread_stop_nowait(). */
4494 list_for_each_entry(connection
, &resource
->connections
, connections
)
4495 drbd_thread_stop(&connection
->worker
);
4497 drbd_free_resource(resource
);
4501 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
)
4503 struct drbd_config_context adm_ctx
;
4504 struct drbd_resource
*resource
;
4505 struct drbd_connection
*connection
;
4506 struct drbd_device
*device
;
4507 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4510 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4511 if (!adm_ctx
.reply_skb
)
4513 if (retcode
!= NO_ERROR
)
4516 resource
= adm_ctx
.resource
;
4517 mutex_lock(&resource
->adm_mutex
);
4519 for_each_connection(connection
, resource
) {
4520 struct drbd_peer_device
*peer_device
;
4522 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
4523 retcode
= drbd_set_role(peer_device
->device
, R_SECONDARY
, 0);
4524 if (retcode
< SS_SUCCESS
) {
4525 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to demote");
4530 retcode
= conn_try_disconnect(connection
, 0);
4531 if (retcode
< SS_SUCCESS
) {
4532 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to disconnect");
4538 idr_for_each_entry(&resource
->devices
, device
, i
) {
4539 retcode
= adm_detach(device
, 0);
4540 if (retcode
< SS_SUCCESS
|| retcode
> NO_ERROR
) {
4541 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to detach");
4546 /* delete volumes */
4547 idr_for_each_entry(&resource
->devices
, device
, i
) {
4548 retcode
= adm_del_minor(device
);
4549 if (retcode
!= NO_ERROR
) {
4550 /* "can not happen" */
4551 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to delete volume");
4556 retcode
= adm_del_resource(resource
);
4558 mutex_unlock(&resource
->adm_mutex
);
4560 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4564 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
)
4566 struct drbd_config_context adm_ctx
;
4567 struct drbd_resource
*resource
;
4568 enum drbd_ret_code retcode
;
4570 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4571 if (!adm_ctx
.reply_skb
)
4573 if (retcode
!= NO_ERROR
)
4575 resource
= adm_ctx
.resource
;
4577 mutex_lock(&resource
->adm_mutex
);
4578 retcode
= adm_del_resource(resource
);
4579 mutex_unlock(&resource
->adm_mutex
);
4581 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4585 void drbd_bcast_event(struct drbd_device
*device
, const struct sib_info
*sib
)
4587 struct sk_buff
*msg
;
4588 struct drbd_genlmsghdr
*d_out
;
4592 seq
= atomic_inc_return(&drbd_genl_seq
);
4593 msg
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4598 d_out
= genlmsg_put(msg
, 0, seq
, &drbd_genl_family
, 0, DRBD_EVENT
);
4599 if (!d_out
) /* cannot happen, but anyways. */
4600 goto nla_put_failure
;
4601 d_out
->minor
= device_to_minor(device
);
4602 d_out
->ret_code
= NO_ERROR
;
4604 if (nla_put_status_info(msg
, device
, sib
))
4605 goto nla_put_failure
;
4606 genlmsg_end(msg
, d_out
);
4607 err
= drbd_genl_multicast_events(msg
, GFP_NOWAIT
);
4608 /* msg has been consumed or freed in netlink_broadcast() */
4609 if (err
&& err
!= -ESRCH
)
4617 drbd_err(device
, "Error %d while broadcasting event. "
4618 "Event seq:%u sib_reason:%u\n",
4619 err
, seq
, sib
->sib_reason
);
4622 static int nla_put_notification_header(struct sk_buff
*msg
,
4623 enum drbd_notification_type type
)
4625 struct drbd_notification_header nh
= {
4629 return drbd_notification_header_to_skb(msg
, &nh
, true);
4632 void notify_resource_state(struct sk_buff
*skb
,
4634 struct drbd_resource
*resource
,
4635 struct resource_info
*resource_info
,
4636 enum drbd_notification_type type
)
4638 struct resource_statistics resource_statistics
;
4639 struct drbd_genlmsghdr
*dh
;
4640 bool multicast
= false;
4644 seq
= atomic_inc_return(¬ify_genl_seq
);
4645 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4653 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_RESOURCE_STATE
);
4655 goto nla_put_failure
;
4657 dh
->ret_code
= NO_ERROR
;
4658 if (nla_put_drbd_cfg_context(skb
, resource
, NULL
, NULL
) ||
4659 nla_put_notification_header(skb
, type
) ||
4660 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4661 resource_info_to_skb(skb
, resource_info
, true)))
4662 goto nla_put_failure
;
4663 resource_statistics
.res_stat_write_ordering
= resource
->write_ordering
;
4664 err
= resource_statistics_to_skb(skb
, &resource_statistics
, !capable(CAP_SYS_ADMIN
));
4666 goto nla_put_failure
;
4667 genlmsg_end(skb
, dh
);
4669 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4670 /* skb has been consumed or freed in netlink_broadcast() */
4671 if (err
&& err
!= -ESRCH
)
4679 drbd_err(resource
, "Error %d while broadcasting event. Event seq:%u\n",
4683 void notify_device_state(struct sk_buff
*skb
,
4685 struct drbd_device
*device
,
4686 struct device_info
*device_info
,
4687 enum drbd_notification_type type
)
4689 struct device_statistics device_statistics
;
4690 struct drbd_genlmsghdr
*dh
;
4691 bool multicast
= false;
4695 seq
= atomic_inc_return(¬ify_genl_seq
);
4696 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4704 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_DEVICE_STATE
);
4706 goto nla_put_failure
;
4707 dh
->minor
= device
->minor
;
4708 dh
->ret_code
= NO_ERROR
;
4709 if (nla_put_drbd_cfg_context(skb
, device
->resource
, NULL
, device
) ||
4710 nla_put_notification_header(skb
, type
) ||
4711 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4712 device_info_to_skb(skb
, device_info
, true)))
4713 goto nla_put_failure
;
4714 device_to_statistics(&device_statistics
, device
);
4715 device_statistics_to_skb(skb
, &device_statistics
, !capable(CAP_SYS_ADMIN
));
4716 genlmsg_end(skb
, dh
);
4718 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4719 /* skb has been consumed or freed in netlink_broadcast() */
4720 if (err
&& err
!= -ESRCH
)
4728 drbd_err(device
, "Error %d while broadcasting event. Event seq:%u\n",
4732 void notify_connection_state(struct sk_buff
*skb
,
4734 struct drbd_connection
*connection
,
4735 struct connection_info
*connection_info
,
4736 enum drbd_notification_type type
)
4738 struct connection_statistics connection_statistics
;
4739 struct drbd_genlmsghdr
*dh
;
4740 bool multicast
= false;
4744 seq
= atomic_inc_return(¬ify_genl_seq
);
4745 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4753 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_CONNECTION_STATE
);
4755 goto nla_put_failure
;
4757 dh
->ret_code
= NO_ERROR
;
4758 if (nla_put_drbd_cfg_context(skb
, connection
->resource
, connection
, NULL
) ||
4759 nla_put_notification_header(skb
, type
) ||
4760 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4761 connection_info_to_skb(skb
, connection_info
, true)))
4762 goto nla_put_failure
;
4763 connection_statistics
.conn_congested
= test_bit(NET_CONGESTED
, &connection
->flags
);
4764 connection_statistics_to_skb(skb
, &connection_statistics
, !capable(CAP_SYS_ADMIN
));
4765 genlmsg_end(skb
, dh
);
4767 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4768 /* skb has been consumed or freed in netlink_broadcast() */
4769 if (err
&& err
!= -ESRCH
)
4777 drbd_err(connection
, "Error %d while broadcasting event. Event seq:%u\n",
4781 void notify_peer_device_state(struct sk_buff
*skb
,
4783 struct drbd_peer_device
*peer_device
,
4784 struct peer_device_info
*peer_device_info
,
4785 enum drbd_notification_type type
)
4787 struct peer_device_statistics peer_device_statistics
;
4788 struct drbd_resource
*resource
= peer_device
->device
->resource
;
4789 struct drbd_genlmsghdr
*dh
;
4790 bool multicast
= false;
4794 seq
= atomic_inc_return(¬ify_genl_seq
);
4795 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4803 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_PEER_DEVICE_STATE
);
4805 goto nla_put_failure
;
4807 dh
->ret_code
= NO_ERROR
;
4808 if (nla_put_drbd_cfg_context(skb
, resource
, peer_device
->connection
, peer_device
->device
) ||
4809 nla_put_notification_header(skb
, type
) ||
4810 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4811 peer_device_info_to_skb(skb
, peer_device_info
, true)))
4812 goto nla_put_failure
;
4813 peer_device_to_statistics(&peer_device_statistics
, peer_device
);
4814 peer_device_statistics_to_skb(skb
, &peer_device_statistics
, !capable(CAP_SYS_ADMIN
));
4815 genlmsg_end(skb
, dh
);
4817 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4818 /* skb has been consumed or freed in netlink_broadcast() */
4819 if (err
&& err
!= -ESRCH
)
4827 drbd_err(peer_device
, "Error %d while broadcasting event. Event seq:%u\n",
4831 void notify_helper(enum drbd_notification_type type
,
4832 struct drbd_device
*device
, struct drbd_connection
*connection
,
4833 const char *name
, int status
)
4835 struct drbd_resource
*resource
= device
? device
->resource
: connection
->resource
;
4836 struct drbd_helper_info helper_info
;
4837 unsigned int seq
= atomic_inc_return(¬ify_genl_seq
);
4838 struct sk_buff
*skb
= NULL
;
4839 struct drbd_genlmsghdr
*dh
;
4842 strlcpy(helper_info
.helper_name
, name
, sizeof(helper_info
.helper_name
));
4843 helper_info
.helper_name_len
= min(strlen(name
), sizeof(helper_info
.helper_name
));
4844 helper_info
.helper_status
= status
;
4846 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4852 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_HELPER
);
4855 dh
->minor
= device
? device
->minor
: -1;
4856 dh
->ret_code
= NO_ERROR
;
4857 mutex_lock(¬ification_mutex
);
4858 if (nla_put_drbd_cfg_context(skb
, resource
, connection
, device
) ||
4859 nla_put_notification_header(skb
, type
) ||
4860 drbd_helper_info_to_skb(skb
, &helper_info
, true))
4862 genlmsg_end(skb
, dh
);
4863 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4865 /* skb has been consumed or freed in netlink_broadcast() */
4866 if (err
&& err
!= -ESRCH
)
4868 mutex_unlock(¬ification_mutex
);
4872 mutex_unlock(¬ification_mutex
);
4875 drbd_err(resource
, "Error %d while broadcasting event. Event seq:%u\n",
4879 static void notify_initial_state_done(struct sk_buff
*skb
, unsigned int seq
)
4881 struct drbd_genlmsghdr
*dh
;
4885 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_INITIAL_STATE_DONE
);
4887 goto nla_put_failure
;
4889 dh
->ret_code
= NO_ERROR
;
4890 if (nla_put_notification_header(skb
, NOTIFY_EXISTS
))
4891 goto nla_put_failure
;
4892 genlmsg_end(skb
, dh
);
4897 pr_err("Error %d sending event. Event seq:%u\n", err
, seq
);
4900 static void free_state_changes(struct list_head
*list
)
4902 while (!list_empty(list
)) {
4903 struct drbd_state_change
*state_change
=
4904 list_first_entry(list
, struct drbd_state_change
, list
);
4905 list_del(&state_change
->list
);
4906 forget_state_change(state_change
);
4910 static unsigned int notifications_for_state_change(struct drbd_state_change
*state_change
)
4913 state_change
->n_connections
+
4914 state_change
->n_devices
+
4915 state_change
->n_devices
* state_change
->n_connections
;
4918 static int get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4920 struct drbd_state_change
*state_change
= (struct drbd_state_change
*)cb
->args
[0];
4921 unsigned int seq
= cb
->args
[2];
4923 enum drbd_notification_type flags
= 0;
4925 /* There is no need for taking notification_mutex here: it doesn't
4926 matter if the initial state events mix with later state chage
4927 events; we can always tell the events apart by the NOTIFY_EXISTS
4931 if (cb
->args
[5] == 1) {
4932 notify_initial_state_done(skb
, seq
);
4936 if (cb
->args
[4] < cb
->args
[3])
4937 flags
|= NOTIFY_CONTINUES
;
4939 notify_resource_state_change(skb
, seq
, state_change
->resource
,
4940 NOTIFY_EXISTS
| flags
);
4944 if (n
< state_change
->n_connections
) {
4945 notify_connection_state_change(skb
, seq
, &state_change
->connections
[n
],
4946 NOTIFY_EXISTS
| flags
);
4949 n
-= state_change
->n_connections
;
4950 if (n
< state_change
->n_devices
) {
4951 notify_device_state_change(skb
, seq
, &state_change
->devices
[n
],
4952 NOTIFY_EXISTS
| flags
);
4955 n
-= state_change
->n_devices
;
4956 if (n
< state_change
->n_devices
* state_change
->n_connections
) {
4957 notify_peer_device_state_change(skb
, seq
, &state_change
->peer_devices
[n
],
4958 NOTIFY_EXISTS
| flags
);
4963 if (cb
->args
[4] == cb
->args
[3]) {
4964 struct drbd_state_change
*next_state_change
=
4965 list_entry(state_change
->list
.next
,
4966 struct drbd_state_change
, list
);
4967 cb
->args
[0] = (long)next_state_change
;
4968 cb
->args
[3] = notifications_for_state_change(next_state_change
);
4975 int drbd_adm_get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4977 struct drbd_resource
*resource
;
4980 if (cb
->args
[5] >= 1) {
4981 if (cb
->args
[5] > 1)
4982 return get_initial_state(skb
, cb
);
4984 struct drbd_state_change
*state_change
=
4985 (struct drbd_state_change
*)cb
->args
[0];
4987 /* connect list to head */
4988 list_add(&head
, &state_change
->list
);
4989 free_state_changes(&head
);
4994 cb
->args
[5] = 2; /* number of iterations */
4995 mutex_lock(&resources_mutex
);
4996 for_each_resource(resource
, &drbd_resources
) {
4997 struct drbd_state_change
*state_change
;
4999 state_change
= remember_old_state(resource
, GFP_KERNEL
);
5000 if (!state_change
) {
5001 if (!list_empty(&head
))
5002 free_state_changes(&head
);
5003 mutex_unlock(&resources_mutex
);
5006 copy_old_to_new_state_change(state_change
);
5007 list_add_tail(&state_change
->list
, &head
);
5008 cb
->args
[5] += notifications_for_state_change(state_change
);
5010 mutex_unlock(&resources_mutex
);
5012 if (!list_empty(&head
)) {
5013 struct drbd_state_change
*state_change
=
5014 list_entry(head
.next
, struct drbd_state_change
, list
);
5015 cb
->args
[0] = (long)state_change
;
5016 cb
->args
[3] = notifications_for_state_change(state_change
);
5017 list_del(&head
); /* detach list from head */
5020 cb
->args
[2] = cb
->nlh
->nlmsg_seq
;
5021 return get_initial_state(skb
, cb
);