1 // SPDX-License-Identifier: GPL-2.0-or-later
5 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
7 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
8 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
9 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
14 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
16 #include <linux/module.h>
17 #include <linux/drbd.h>
20 #include <linux/file.h>
21 #include <linux/slab.h>
22 #include <linux/blkpg.h>
23 #include <linux/cpumask.h>
25 #include "drbd_protocol.h"
27 #include "drbd_state_change.h"
28 #include <asm/unaligned.h>
29 #include <linux/drbd_limits.h>
30 #include <linux/kthread.h>
32 #include <net/genetlink.h>
35 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
36 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
38 int drbd_adm_new_minor(struct sk_buff
*skb
, struct genl_info
*info
);
39 int drbd_adm_del_minor(struct sk_buff
*skb
, struct genl_info
*info
);
41 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
);
42 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
);
43 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
);
45 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
);
46 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
);
47 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
);
48 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
);
49 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
);
50 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
);
51 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
);
52 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
);
53 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
);
54 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
);
55 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
);
56 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
);
57 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
);
58 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
);
59 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
);
60 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
);
61 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
);
62 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
);
63 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
);
64 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
);
66 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
);
67 int drbd_adm_dump_resources(struct sk_buff
*skb
, struct netlink_callback
*cb
);
68 int drbd_adm_dump_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
);
69 int drbd_adm_dump_devices_done(struct netlink_callback
*cb
);
70 int drbd_adm_dump_connections(struct sk_buff
*skb
, struct netlink_callback
*cb
);
71 int drbd_adm_dump_connections_done(struct netlink_callback
*cb
);
72 int drbd_adm_dump_peer_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
);
73 int drbd_adm_dump_peer_devices_done(struct netlink_callback
*cb
);
74 int drbd_adm_get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
);
76 #include <linux/drbd_genl_api.h>
78 #include <linux/genl_magic_func.h>
80 static atomic_t drbd_genl_seq
= ATOMIC_INIT(2); /* two. */
81 static atomic_t notify_genl_seq
= ATOMIC_INIT(2); /* two. */
83 DEFINE_MUTEX(notification_mutex
);
85 /* used blkdev_get_by_path, to claim our meta data device(s) */
86 static char *drbd_m_holder
= "Hands off! this is DRBD's meta data device.";
88 static void drbd_adm_send_reply(struct sk_buff
*skb
, struct genl_info
*info
)
90 genlmsg_end(skb
, genlmsg_data(nlmsg_data(nlmsg_hdr(skb
))));
91 if (genlmsg_reply(skb
, info
))
92 pr_err("error sending genl reply\n");
95 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
96 * reason it could fail was no space in skb, and there are 4k available. */
97 static int drbd_msg_put_info(struct sk_buff
*skb
, const char *info
)
102 if (!info
|| !info
[0])
105 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_CFG_REPLY
);
109 err
= nla_put_string(skb
, T_info_text
, info
);
111 nla_nest_cancel(skb
, nla
);
114 nla_nest_end(skb
, nla
);
119 static int drbd_msg_sprintf_info(struct sk_buff
*skb
, const char *fmt
, ...)
122 struct nlattr
*nla
, *txt
;
126 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_CFG_REPLY
);
130 txt
= nla_reserve(skb
, T_info_text
, 256);
132 nla_nest_cancel(skb
, nla
);
136 len
= vscnprintf(nla_data(txt
), 256, fmt
, args
);
139 /* maybe: retry with larger reserve, if truncated */
140 txt
->nla_len
= nla_attr_size(len
+1);
141 nlmsg_trim(skb
, (char*)txt
+ NLA_ALIGN(txt
->nla_len
));
142 nla_nest_end(skb
, nla
);
147 /* This would be a good candidate for a "pre_doit" hook,
148 * and per-family private info->pointers.
149 * But we need to stay compatible with older kernels.
150 * If it returns successfully, adm_ctx members are valid.
152 * At this point, we still rely on the global genl_lock().
153 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
154 * to add additional synchronization against object destruction/modification.
156 #define DRBD_ADM_NEED_MINOR 1
157 #define DRBD_ADM_NEED_RESOURCE 2
158 #define DRBD_ADM_NEED_CONNECTION 4
159 static int drbd_adm_prepare(struct drbd_config_context
*adm_ctx
,
160 struct sk_buff
*skb
, struct genl_info
*info
, unsigned flags
)
162 struct drbd_genlmsghdr
*d_in
= info
->userhdr
;
163 const u8 cmd
= info
->genlhdr
->cmd
;
166 memset(adm_ctx
, 0, sizeof(*adm_ctx
));
168 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
169 if (cmd
!= DRBD_ADM_GET_STATUS
&& !capable(CAP_NET_ADMIN
))
172 adm_ctx
->reply_skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
173 if (!adm_ctx
->reply_skb
) {
178 adm_ctx
->reply_dh
= genlmsg_put_reply(adm_ctx
->reply_skb
,
179 info
, &drbd_genl_family
, 0, cmd
);
180 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
182 if (!adm_ctx
->reply_dh
) {
187 adm_ctx
->reply_dh
->minor
= d_in
->minor
;
188 adm_ctx
->reply_dh
->ret_code
= NO_ERROR
;
190 adm_ctx
->volume
= VOLUME_UNSPECIFIED
;
191 if (info
->attrs
[DRBD_NLA_CFG_CONTEXT
]) {
193 /* parse and validate only */
194 err
= drbd_cfg_context_from_attrs(NULL
, info
);
198 /* It was present, and valid,
199 * copy it over to the reply skb. */
200 err
= nla_put_nohdr(adm_ctx
->reply_skb
,
201 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]->nla_len
,
202 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]);
206 /* and assign stuff to the adm_ctx */
207 nla
= nested_attr_tb
[__nla_type(T_ctx_volume
)];
209 adm_ctx
->volume
= nla_get_u32(nla
);
210 nla
= nested_attr_tb
[__nla_type(T_ctx_resource_name
)];
212 adm_ctx
->resource_name
= nla_data(nla
);
213 adm_ctx
->my_addr
= nested_attr_tb
[__nla_type(T_ctx_my_addr
)];
214 adm_ctx
->peer_addr
= nested_attr_tb
[__nla_type(T_ctx_peer_addr
)];
215 if ((adm_ctx
->my_addr
&&
216 nla_len(adm_ctx
->my_addr
) > sizeof(adm_ctx
->connection
->my_addr
)) ||
217 (adm_ctx
->peer_addr
&&
218 nla_len(adm_ctx
->peer_addr
) > sizeof(adm_ctx
->connection
->peer_addr
))) {
224 adm_ctx
->minor
= d_in
->minor
;
225 adm_ctx
->device
= minor_to_device(d_in
->minor
);
227 /* We are protected by the global genl_lock().
228 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
229 * so make sure this object stays around. */
231 kref_get(&adm_ctx
->device
->kref
);
233 if (adm_ctx
->resource_name
) {
234 adm_ctx
->resource
= drbd_find_resource(adm_ctx
->resource_name
);
237 if (!adm_ctx
->device
&& (flags
& DRBD_ADM_NEED_MINOR
)) {
238 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown minor");
239 return ERR_MINOR_INVALID
;
241 if (!adm_ctx
->resource
&& (flags
& DRBD_ADM_NEED_RESOURCE
)) {
242 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown resource");
243 if (adm_ctx
->resource_name
)
244 return ERR_RES_NOT_KNOWN
;
245 return ERR_INVALID_REQUEST
;
248 if (flags
& DRBD_ADM_NEED_CONNECTION
) {
249 if (adm_ctx
->resource
) {
250 drbd_msg_put_info(adm_ctx
->reply_skb
, "no resource name expected");
251 return ERR_INVALID_REQUEST
;
253 if (adm_ctx
->device
) {
254 drbd_msg_put_info(adm_ctx
->reply_skb
, "no minor number expected");
255 return ERR_INVALID_REQUEST
;
257 if (adm_ctx
->my_addr
&& adm_ctx
->peer_addr
)
258 adm_ctx
->connection
= conn_get_by_addrs(nla_data(adm_ctx
->my_addr
),
259 nla_len(adm_ctx
->my_addr
),
260 nla_data(adm_ctx
->peer_addr
),
261 nla_len(adm_ctx
->peer_addr
));
262 if (!adm_ctx
->connection
) {
263 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown connection");
264 return ERR_INVALID_REQUEST
;
268 /* some more paranoia, if the request was over-determined */
269 if (adm_ctx
->device
&& adm_ctx
->resource
&&
270 adm_ctx
->device
->resource
!= adm_ctx
->resource
) {
271 pr_warn("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
272 adm_ctx
->minor
, adm_ctx
->resource
->name
,
273 adm_ctx
->device
->resource
->name
);
274 drbd_msg_put_info(adm_ctx
->reply_skb
, "minor exists in different resource");
275 return ERR_INVALID_REQUEST
;
277 if (adm_ctx
->device
&&
278 adm_ctx
->volume
!= VOLUME_UNSPECIFIED
&&
279 adm_ctx
->volume
!= adm_ctx
->device
->vnr
) {
280 pr_warn("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
281 adm_ctx
->minor
, adm_ctx
->volume
,
282 adm_ctx
->device
->vnr
, adm_ctx
->device
->resource
->name
);
283 drbd_msg_put_info(adm_ctx
->reply_skb
, "minor exists as different volume");
284 return ERR_INVALID_REQUEST
;
287 /* still, provide adm_ctx->resource always, if possible. */
288 if (!adm_ctx
->resource
) {
289 adm_ctx
->resource
= adm_ctx
->device
? adm_ctx
->device
->resource
290 : adm_ctx
->connection
? adm_ctx
->connection
->resource
: NULL
;
291 if (adm_ctx
->resource
)
292 kref_get(&adm_ctx
->resource
->kref
);
298 nlmsg_free(adm_ctx
->reply_skb
);
299 adm_ctx
->reply_skb
= NULL
;
303 static int drbd_adm_finish(struct drbd_config_context
*adm_ctx
,
304 struct genl_info
*info
, int retcode
)
306 if (adm_ctx
->device
) {
307 kref_put(&adm_ctx
->device
->kref
, drbd_destroy_device
);
308 adm_ctx
->device
= NULL
;
310 if (adm_ctx
->connection
) {
311 kref_put(&adm_ctx
->connection
->kref
, &drbd_destroy_connection
);
312 adm_ctx
->connection
= NULL
;
314 if (adm_ctx
->resource
) {
315 kref_put(&adm_ctx
->resource
->kref
, drbd_destroy_resource
);
316 adm_ctx
->resource
= NULL
;
319 if (!adm_ctx
->reply_skb
)
322 adm_ctx
->reply_dh
->ret_code
= retcode
;
323 drbd_adm_send_reply(adm_ctx
->reply_skb
, info
);
327 static void setup_khelper_env(struct drbd_connection
*connection
, char **envp
)
331 /* FIXME: A future version will not allow this case. */
332 if (connection
->my_addr_len
== 0 || connection
->peer_addr_len
== 0)
335 switch (((struct sockaddr
*)&connection
->peer_addr
)->sa_family
) {
338 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI6",
339 &((struct sockaddr_in6
*)&connection
->peer_addr
)->sin6_addr
);
343 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
344 &((struct sockaddr_in
*)&connection
->peer_addr
)->sin_addr
);
348 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
349 &((struct sockaddr_in
*)&connection
->peer_addr
)->sin_addr
);
351 snprintf(envp
[3], 20, "DRBD_PEER_AF=%s", afs
);
354 int drbd_khelper(struct drbd_device
*device
, char *cmd
)
356 char *envp
[] = { "HOME=/",
358 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
359 (char[20]) { }, /* address family */
360 (char[60]) { }, /* address */
363 char *argv
[] = {drbd_usermode_helper
, cmd
, mb
, NULL
};
364 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
368 if (current
== connection
->worker
.task
)
369 set_bit(CALLBACK_PENDING
, &connection
->flags
);
371 snprintf(mb
, 14, "minor-%d", device_to_minor(device
));
372 setup_khelper_env(connection
, envp
);
374 /* The helper may take some time.
375 * write out any unsynced meta data changes now */
376 drbd_md_sync(device
);
378 drbd_info(device
, "helper command: %s %s %s\n", drbd_usermode_helper
, cmd
, mb
);
379 sib
.sib_reason
= SIB_HELPER_PRE
;
380 sib
.helper_name
= cmd
;
381 drbd_bcast_event(device
, &sib
);
382 notify_helper(NOTIFY_CALL
, device
, connection
, cmd
, 0);
383 ret
= call_usermodehelper(drbd_usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
385 drbd_warn(device
, "helper command: %s %s %s exit code %u (0x%x)\n",
386 drbd_usermode_helper
, cmd
, mb
,
387 (ret
>> 8) & 0xff, ret
);
389 drbd_info(device
, "helper command: %s %s %s exit code %u (0x%x)\n",
390 drbd_usermode_helper
, cmd
, mb
,
391 (ret
>> 8) & 0xff, ret
);
392 sib
.sib_reason
= SIB_HELPER_POST
;
393 sib
.helper_exit_code
= ret
;
394 drbd_bcast_event(device
, &sib
);
395 notify_helper(NOTIFY_RESPONSE
, device
, connection
, cmd
, ret
);
397 if (current
== connection
->worker
.task
)
398 clear_bit(CALLBACK_PENDING
, &connection
->flags
);
400 if (ret
< 0) /* Ignore any ERRNOs we got. */
406 enum drbd_peer_state
conn_khelper(struct drbd_connection
*connection
, char *cmd
)
408 char *envp
[] = { "HOME=/",
410 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
411 (char[20]) { }, /* address family */
412 (char[60]) { }, /* address */
414 char *resource_name
= connection
->resource
->name
;
415 char *argv
[] = {drbd_usermode_helper
, cmd
, resource_name
, NULL
};
418 setup_khelper_env(connection
, envp
);
419 conn_md_sync(connection
);
421 drbd_info(connection
, "helper command: %s %s %s\n", drbd_usermode_helper
, cmd
, resource_name
);
422 /* TODO: conn_bcast_event() ?? */
423 notify_helper(NOTIFY_CALL
, NULL
, connection
, cmd
, 0);
425 ret
= call_usermodehelper(drbd_usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
427 drbd_warn(connection
, "helper command: %s %s %s exit code %u (0x%x)\n",
428 drbd_usermode_helper
, cmd
, resource_name
,
429 (ret
>> 8) & 0xff, ret
);
431 drbd_info(connection
, "helper command: %s %s %s exit code %u (0x%x)\n",
432 drbd_usermode_helper
, cmd
, resource_name
,
433 (ret
>> 8) & 0xff, ret
);
434 /* TODO: conn_bcast_event() ?? */
435 notify_helper(NOTIFY_RESPONSE
, NULL
, connection
, cmd
, ret
);
437 if (ret
< 0) /* Ignore any ERRNOs we got. */
443 static enum drbd_fencing_p
highest_fencing_policy(struct drbd_connection
*connection
)
445 enum drbd_fencing_p fp
= FP_NOT_AVAIL
;
446 struct drbd_peer_device
*peer_device
;
450 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
451 struct drbd_device
*device
= peer_device
->device
;
452 if (get_ldev_if_state(device
, D_CONSISTENT
)) {
453 struct disk_conf
*disk_conf
=
454 rcu_dereference(peer_device
->device
->ldev
->disk_conf
);
455 fp
= max_t(enum drbd_fencing_p
, fp
, disk_conf
->fencing
);
464 static bool resource_is_supended(struct drbd_resource
*resource
)
466 return resource
->susp
|| resource
->susp_fen
|| resource
->susp_nod
;
469 bool conn_try_outdate_peer(struct drbd_connection
*connection
)
471 struct drbd_resource
* const resource
= connection
->resource
;
472 unsigned int connect_cnt
;
473 union drbd_state mask
= { };
474 union drbd_state val
= { };
475 enum drbd_fencing_p fp
;
479 spin_lock_irq(&resource
->req_lock
);
480 if (connection
->cstate
>= C_WF_REPORT_PARAMS
) {
481 drbd_err(connection
, "Expected cstate < C_WF_REPORT_PARAMS\n");
482 spin_unlock_irq(&resource
->req_lock
);
486 connect_cnt
= connection
->connect_cnt
;
487 spin_unlock_irq(&resource
->req_lock
);
489 fp
= highest_fencing_policy(connection
);
492 drbd_warn(connection
, "Not fencing peer, I'm not even Consistent myself.\n");
493 spin_lock_irq(&resource
->req_lock
);
494 if (connection
->cstate
< C_WF_REPORT_PARAMS
) {
495 _conn_request_state(connection
,
496 (union drbd_state
) { { .susp_fen
= 1 } },
497 (union drbd_state
) { { .susp_fen
= 0 } },
498 CS_VERBOSE
| CS_HARD
| CS_DC_SUSP
);
499 /* We are no longer suspended due to the fencing policy.
500 * We may still be suspended due to the on-no-data-accessible policy.
501 * If that was OND_IO_ERROR, fail pending requests. */
502 if (!resource_is_supended(resource
))
503 _tl_restart(connection
, CONNECTION_LOST_WHILE_PENDING
);
505 /* Else: in case we raced with a connection handshake,
506 * let the handshake figure out if we maybe can RESEND,
507 * and do not resume/fail pending requests here.
508 * Worst case is we stay suspended for now, which may be
509 * resolved by either re-establishing the replication link, or
510 * the next link failure, or eventually the administrator. */
511 spin_unlock_irq(&resource
->req_lock
);
519 r
= conn_khelper(connection
, "fence-peer");
521 switch ((r
>>8) & 0xff) {
522 case P_INCONSISTENT
: /* peer is inconsistent */
523 ex_to_string
= "peer is inconsistent or worse";
525 val
.pdsk
= D_INCONSISTENT
;
527 case P_OUTDATED
: /* peer got outdated, or was already outdated */
528 ex_to_string
= "peer was fenced";
530 val
.pdsk
= D_OUTDATED
;
532 case P_DOWN
: /* peer was down */
533 if (conn_highest_disk(connection
) == D_UP_TO_DATE
) {
534 /* we will(have) create(d) a new UUID anyways... */
535 ex_to_string
= "peer is unreachable, assumed to be dead";
537 val
.pdsk
= D_OUTDATED
;
539 ex_to_string
= "peer unreachable, doing nothing since disk != UpToDate";
542 case P_PRIMARY
: /* Peer is primary, voluntarily outdate myself.
543 * This is useful when an unconnected R_SECONDARY is asked to
544 * become R_PRIMARY, but finds the other peer being active. */
545 ex_to_string
= "peer is active";
546 drbd_warn(connection
, "Peer is primary, outdating myself.\n");
548 val
.disk
= D_OUTDATED
;
551 /* THINK: do we need to handle this
552 * like case 4, or more like case 5? */
553 if (fp
!= FP_STONITH
)
554 drbd_err(connection
, "fence-peer() = 7 && fencing != Stonith !!!\n");
555 ex_to_string
= "peer was stonithed";
557 val
.pdsk
= D_OUTDATED
;
560 /* The script is broken ... */
561 drbd_err(connection
, "fence-peer helper broken, returned %d\n", (r
>>8)&0xff);
562 return false; /* Eventually leave IO frozen */
565 drbd_info(connection
, "fence-peer helper returned %d (%s)\n",
566 (r
>>8) & 0xff, ex_to_string
);
569 conn_request_state(connection, mask, val, CS_VERBOSE);
570 here, because we might were able to re-establish the connection in the
572 spin_lock_irq(&resource
->req_lock
);
573 if (connection
->cstate
< C_WF_REPORT_PARAMS
&& !test_bit(STATE_SENT
, &connection
->flags
)) {
574 if (connection
->connect_cnt
!= connect_cnt
)
575 /* In case the connection was established and droped
576 while the fence-peer handler was running, ignore it */
577 drbd_info(connection
, "Ignoring fence-peer exit code\n");
579 _conn_request_state(connection
, mask
, val
, CS_VERBOSE
);
581 spin_unlock_irq(&resource
->req_lock
);
583 return conn_highest_pdsk(connection
) <= D_OUTDATED
;
586 static int _try_outdate_peer_async(void *data
)
588 struct drbd_connection
*connection
= (struct drbd_connection
*)data
;
590 conn_try_outdate_peer(connection
);
592 kref_put(&connection
->kref
, drbd_destroy_connection
);
596 void conn_try_outdate_peer_async(struct drbd_connection
*connection
)
598 struct task_struct
*opa
;
600 kref_get(&connection
->kref
);
601 /* We may have just sent a signal to this thread
602 * to get it out of some blocking network function.
603 * Clear signals; otherwise kthread_run(), which internally uses
604 * wait_on_completion_killable(), will mistake our pending signal
605 * for a new fatal signal and fail. */
606 flush_signals(current
);
607 opa
= kthread_run(_try_outdate_peer_async
, connection
, "drbd_async_h");
609 drbd_err(connection
, "out of mem, failed to invoke fence-peer helper\n");
610 kref_put(&connection
->kref
, drbd_destroy_connection
);
615 drbd_set_role(struct drbd_device
*const device
, enum drbd_role new_role
, int force
)
617 struct drbd_peer_device
*const peer_device
= first_peer_device(device
);
618 struct drbd_connection
*const connection
= peer_device
? peer_device
->connection
: NULL
;
619 const int max_tries
= 4;
620 enum drbd_state_rv rv
= SS_UNKNOWN_ERROR
;
624 union drbd_state mask
, val
;
626 if (new_role
== R_PRIMARY
) {
627 struct drbd_connection
*connection
;
629 /* Detect dead peers as soon as possible. */
632 for_each_connection(connection
, device
->resource
)
633 request_ping(connection
);
637 mutex_lock(device
->state_mutex
);
639 mask
.i
= 0; mask
.role
= R_MASK
;
640 val
.i
= 0; val
.role
= new_role
;
642 while (try++ < max_tries
) {
643 rv
= _drbd_request_state_holding_state_mutex(device
, mask
, val
, CS_WAIT_COMPLETE
);
645 /* in case we first succeeded to outdate,
646 * but now suddenly could establish a connection */
647 if (rv
== SS_CW_FAILED_BY_PEER
&& mask
.pdsk
!= 0) {
653 if (rv
== SS_NO_UP_TO_DATE_DISK
&& force
&&
654 (device
->state
.disk
< D_UP_TO_DATE
&&
655 device
->state
.disk
>= D_INCONSISTENT
)) {
657 val
.disk
= D_UP_TO_DATE
;
662 if (rv
== SS_NO_UP_TO_DATE_DISK
&&
663 device
->state
.disk
== D_CONSISTENT
&& mask
.pdsk
== 0) {
664 D_ASSERT(device
, device
->state
.pdsk
== D_UNKNOWN
);
666 if (conn_try_outdate_peer(connection
)) {
667 val
.disk
= D_UP_TO_DATE
;
673 if (rv
== SS_NOTHING_TO_DO
)
675 if (rv
== SS_PRIMARY_NOP
&& mask
.pdsk
== 0) {
676 if (!conn_try_outdate_peer(connection
) && force
) {
677 drbd_warn(device
, "Forced into split brain situation!\n");
679 val
.pdsk
= D_OUTDATED
;
684 if (rv
== SS_TWO_PRIMARIES
) {
685 /* Maybe the peer is detected as dead very soon...
686 retry at most once more in this case. */
687 if (try < max_tries
) {
691 nc
= rcu_dereference(connection
->net_conf
);
692 timeo
= nc
? (nc
->ping_timeo
+ 1) * HZ
/ 10 : 1;
694 schedule_timeout_interruptible(timeo
);
698 if (rv
< SS_SUCCESS
) {
699 rv
= _drbd_request_state(device
, mask
, val
,
700 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
711 drbd_warn(device
, "Forced to consider local data as UpToDate!\n");
713 /* Wait until nothing is on the fly :) */
714 wait_event(device
->misc_wait
, atomic_read(&device
->ap_pending_cnt
) == 0);
716 /* FIXME also wait for all pending P_BARRIER_ACK? */
718 if (new_role
== R_SECONDARY
) {
719 if (get_ldev(device
)) {
720 device
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
724 mutex_lock(&device
->resource
->conf_update
);
725 nc
= connection
->net_conf
;
727 nc
->discard_my_data
= 0; /* without copy; single bit op is atomic */
728 mutex_unlock(&device
->resource
->conf_update
);
730 if (get_ldev(device
)) {
731 if (((device
->state
.conn
< C_CONNECTED
||
732 device
->state
.pdsk
<= D_FAILED
)
733 && device
->ldev
->md
.uuid
[UI_BITMAP
] == 0) || forced
)
734 drbd_uuid_new_current(device
);
736 device
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
741 /* writeout of activity log covered areas of the bitmap
742 * to stable storage done in after state change already */
744 if (device
->state
.conn
>= C_WF_REPORT_PARAMS
) {
745 /* if this was forced, we should consider sync */
747 drbd_send_uuids(peer_device
);
748 drbd_send_current_state(peer_device
);
751 drbd_md_sync(device
);
752 set_disk_ro(device
->vdisk
, new_role
== R_SECONDARY
);
753 kobject_uevent(&disk_to_dev(device
->vdisk
)->kobj
, KOBJ_CHANGE
);
755 mutex_unlock(device
->state_mutex
);
759 static const char *from_attrs_err_to_txt(int err
)
761 return err
== -ENOMSG
? "required attribute missing" :
762 err
== -EOPNOTSUPP
? "unknown mandatory attribute" :
763 err
== -EEXIST
? "can not change invariant setting" :
764 "invalid attribute value";
767 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
)
769 struct drbd_config_context adm_ctx
;
770 struct set_role_parms parms
;
772 enum drbd_ret_code retcode
;
774 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
775 if (!adm_ctx
.reply_skb
)
777 if (retcode
!= NO_ERROR
)
780 memset(&parms
, 0, sizeof(parms
));
781 if (info
->attrs
[DRBD_NLA_SET_ROLE_PARMS
]) {
782 err
= set_role_parms_from_attrs(&parms
, info
);
784 retcode
= ERR_MANDATORY_TAG
;
785 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
790 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
792 if (info
->genlhdr
->cmd
== DRBD_ADM_PRIMARY
)
793 retcode
= drbd_set_role(adm_ctx
.device
, R_PRIMARY
, parms
.assume_uptodate
);
795 retcode
= drbd_set_role(adm_ctx
.device
, R_SECONDARY
, 0);
797 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
800 drbd_adm_finish(&adm_ctx
, info
, retcode
);
804 /* Initializes the md.*_offset members, so we are able to find
805 * the on disk meta data.
807 * We currently have two possible layouts:
809 * |----------- md_size_sect ------------------|
810 * [ 4k superblock ][ activity log ][ Bitmap ]
812 * | bm_offset = al_offset + X |
813 * ==> bitmap sectors = md_size_sect - bm_offset
816 * |----------- md_size_sect ------------------|
817 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
819 * | bm_offset = al_offset - Y |
820 * ==> bitmap sectors = Y = al_offset - bm_offset
822 * Activity log size used to be fixed 32kB,
823 * but is about to become configurable.
825 static void drbd_md_set_sector_offsets(struct drbd_device
*device
,
826 struct drbd_backing_dev
*bdev
)
828 sector_t md_size_sect
= 0;
829 unsigned int al_size_sect
= bdev
->md
.al_size_4k
* 8;
831 bdev
->md
.md_offset
= drbd_md_ss(bdev
);
833 switch (bdev
->md
.meta_dev_idx
) {
835 /* v07 style fixed size indexed meta data */
836 bdev
->md
.md_size_sect
= MD_128MB_SECT
;
837 bdev
->md
.al_offset
= MD_4kB_SECT
;
838 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
840 case DRBD_MD_INDEX_FLEX_EXT
:
841 /* just occupy the full device; unit: sectors */
842 bdev
->md
.md_size_sect
= drbd_get_capacity(bdev
->md_bdev
);
843 bdev
->md
.al_offset
= MD_4kB_SECT
;
844 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
846 case DRBD_MD_INDEX_INTERNAL
:
847 case DRBD_MD_INDEX_FLEX_INT
:
848 /* al size is still fixed */
849 bdev
->md
.al_offset
= -al_size_sect
;
850 /* we need (slightly less than) ~ this much bitmap sectors: */
851 md_size_sect
= drbd_get_capacity(bdev
->backing_bdev
);
852 md_size_sect
= ALIGN(md_size_sect
, BM_SECT_PER_EXT
);
853 md_size_sect
= BM_SECT_TO_EXT(md_size_sect
);
854 md_size_sect
= ALIGN(md_size_sect
, 8);
856 /* plus the "drbd meta data super block",
857 * and the activity log; */
858 md_size_sect
+= MD_4kB_SECT
+ al_size_sect
;
860 bdev
->md
.md_size_sect
= md_size_sect
;
861 /* bitmap offset is adjusted by 'super' block size */
862 bdev
->md
.bm_offset
= -md_size_sect
+ MD_4kB_SECT
;
867 /* input size is expected to be in KB */
868 char *ppsize(char *buf
, unsigned long long size
)
870 /* Needs 9 bytes at max including trailing NUL:
871 * -1ULL ==> "16384 EB" */
872 static char units
[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
874 while (size
>= 10000 && base
< sizeof(units
)-1) {
876 size
= (size
>> 10) + !!(size
& (1<<9));
879 sprintf(buf
, "%u %cB", (unsigned)size
, units
[base
]);
884 /* there is still a theoretical deadlock when called from receiver
885 * on an D_INCONSISTENT R_PRIMARY:
886 * remote READ does inc_ap_bio, receiver would need to receive answer
887 * packet from remote to dec_ap_bio again.
888 * receiver receive_sizes(), comes here,
889 * waits for ap_bio_cnt == 0. -> deadlock.
890 * but this cannot happen, actually, because:
891 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
892 * (not connected, or bad/no disk on peer):
893 * see drbd_fail_request_early, ap_bio_cnt is zero.
894 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
895 * peer may not initiate a resize.
897 /* Note these are not to be confused with
898 * drbd_adm_suspend_io/drbd_adm_resume_io,
899 * which are (sub) state changes triggered by admin (drbdsetup),
900 * and can be long lived.
901 * This changes an device->flag, is triggered by drbd internals,
902 * and should be short-lived. */
903 /* It needs to be a counter, since multiple threads might
904 independently suspend and resume IO. */
905 void drbd_suspend_io(struct drbd_device
*device
)
907 atomic_inc(&device
->suspend_cnt
);
908 if (drbd_suspended(device
))
910 wait_event(device
->misc_wait
, !atomic_read(&device
->ap_bio_cnt
));
913 void drbd_resume_io(struct drbd_device
*device
)
915 if (atomic_dec_and_test(&device
->suspend_cnt
))
916 wake_up(&device
->misc_wait
);
920 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
921 * @device: DRBD device.
923 * Returns 0 on success, negative return values indicate errors.
924 * You should call drbd_md_sync() after calling this function.
926 enum determine_dev_size
927 drbd_determine_dev_size(struct drbd_device
*device
, enum dds_flags flags
, struct resize_parms
*rs
) __must_hold(local
)
929 struct md_offsets_and_sizes
{
930 u64 last_agreed_sect
;
937 u32 al_stripe_size_4k
;
939 sector_t u_size
, size
;
940 struct drbd_md
*md
= &device
->ldev
->md
;
943 int md_moved
, la_size_changed
;
944 enum determine_dev_size rv
= DS_UNCHANGED
;
946 /* We may change the on-disk offsets of our meta data below. Lock out
947 * anything that may cause meta data IO, to avoid acting on incomplete
948 * layout changes or scribbling over meta data that is in the process
951 * Move is not exactly correct, btw, currently we have all our meta
952 * data in core memory, to "move" it we just write it all out, there
954 drbd_suspend_io(device
);
955 buffer
= drbd_md_get_buffer(device
, __func__
); /* Lock meta-data IO */
957 drbd_resume_io(device
);
961 /* remember current offset and sizes */
962 prev
.last_agreed_sect
= md
->la_size_sect
;
963 prev
.md_offset
= md
->md_offset
;
964 prev
.al_offset
= md
->al_offset
;
965 prev
.bm_offset
= md
->bm_offset
;
966 prev
.md_size_sect
= md
->md_size_sect
;
967 prev
.al_stripes
= md
->al_stripes
;
968 prev
.al_stripe_size_4k
= md
->al_stripe_size_4k
;
971 /* rs is non NULL if we should change the AL layout only */
972 md
->al_stripes
= rs
->al_stripes
;
973 md
->al_stripe_size_4k
= rs
->al_stripe_size
/ 4;
974 md
->al_size_4k
= (u64
)rs
->al_stripes
* rs
->al_stripe_size
/ 4;
977 drbd_md_set_sector_offsets(device
, device
->ldev
);
980 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
982 size
= drbd_new_dev_size(device
, device
->ldev
, u_size
, flags
& DDSF_FORCED
);
984 if (size
< prev
.last_agreed_sect
) {
985 if (rs
&& u_size
== 0) {
986 /* Remove "rs &&" later. This check should always be active, but
987 right now the receiver expects the permissive behavior */
988 drbd_warn(device
, "Implicit shrink not allowed. "
989 "Use --size=%llus for explicit shrink.\n",
990 (unsigned long long)size
);
991 rv
= DS_ERROR_SHRINK
;
994 rv
= DS_ERROR_SPACE_MD
;
995 if (rv
!= DS_UNCHANGED
)
999 if (get_capacity(device
->vdisk
) != size
||
1000 drbd_bm_capacity(device
) != size
) {
1002 err
= drbd_bm_resize(device
, size
, !(flags
& DDSF_NO_RESYNC
));
1003 if (unlikely(err
)) {
1004 /* currently there is only one error: ENOMEM! */
1005 size
= drbd_bm_capacity(device
);
1007 drbd_err(device
, "OUT OF MEMORY! "
1008 "Could not allocate bitmap!\n");
1010 drbd_err(device
, "BM resizing failed. "
1011 "Leaving size unchanged\n");
1015 /* racy, see comments above. */
1016 drbd_set_my_capacity(device
, size
);
1017 md
->la_size_sect
= size
;
1022 la_size_changed
= (prev
.last_agreed_sect
!= md
->la_size_sect
);
1024 md_moved
= prev
.md_offset
!= md
->md_offset
1025 || prev
.md_size_sect
!= md
->md_size_sect
;
1027 if (la_size_changed
|| md_moved
|| rs
) {
1030 /* We do some synchronous IO below, which may take some time.
1031 * Clear the timer, to avoid scary "timer expired!" messages,
1032 * "Superblock" is written out at least twice below, anyways. */
1033 del_timer(&device
->md_sync_timer
);
1035 /* We won't change the "al-extents" setting, we just may need
1036 * to move the on-disk location of the activity log ringbuffer.
1037 * Lock for transaction is good enough, it may well be "dirty"
1038 * or even "starving". */
1039 wait_event(device
->al_wait
, lc_try_lock_for_transaction(device
->act_log
));
1041 /* mark current on-disk bitmap and activity log as unreliable */
1042 prev_flags
= md
->flags
;
1043 md
->flags
|= MDF_FULL_SYNC
| MDF_AL_DISABLED
;
1044 drbd_md_write(device
, buffer
);
1046 drbd_al_initialize(device
, buffer
);
1048 drbd_info(device
, "Writing the whole bitmap, %s\n",
1049 la_size_changed
&& md_moved
? "size changed and md moved" :
1050 la_size_changed
? "size changed" : "md moved");
1051 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1052 drbd_bitmap_io(device
, md_moved
? &drbd_bm_write_all
: &drbd_bm_write
,
1053 "size changed", BM_LOCKED_MASK
);
1055 /* on-disk bitmap and activity log is authoritative again
1056 * (unless there was an IO error meanwhile...) */
1057 md
->flags
= prev_flags
;
1058 drbd_md_write(device
, buffer
);
1061 drbd_info(device
, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1062 md
->al_stripes
, md
->al_stripe_size_4k
* 4);
1065 if (size
> prev
.last_agreed_sect
)
1066 rv
= prev
.last_agreed_sect
? DS_GREW
: DS_GREW_FROM_ZERO
;
1067 if (size
< prev
.last_agreed_sect
)
1072 /* restore previous offset and sizes */
1073 md
->la_size_sect
= prev
.last_agreed_sect
;
1074 md
->md_offset
= prev
.md_offset
;
1075 md
->al_offset
= prev
.al_offset
;
1076 md
->bm_offset
= prev
.bm_offset
;
1077 md
->md_size_sect
= prev
.md_size_sect
;
1078 md
->al_stripes
= prev
.al_stripes
;
1079 md
->al_stripe_size_4k
= prev
.al_stripe_size_4k
;
1080 md
->al_size_4k
= (u64
)prev
.al_stripes
* prev
.al_stripe_size_4k
;
1082 lc_unlock(device
->act_log
);
1083 wake_up(&device
->al_wait
);
1084 drbd_md_put_buffer(device
);
1085 drbd_resume_io(device
);
1091 drbd_new_dev_size(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
,
1092 sector_t u_size
, int assume_peer_has_space
)
1094 sector_t p_size
= device
->p_size
; /* partner's disk size. */
1095 sector_t la_size_sect
= bdev
->md
.la_size_sect
; /* last agreed size. */
1096 sector_t m_size
; /* my size */
1099 m_size
= drbd_get_max_capacity(bdev
);
1101 if (device
->state
.conn
< C_CONNECTED
&& assume_peer_has_space
) {
1102 drbd_warn(device
, "Resize while not connected was forced by the user!\n");
1106 if (p_size
&& m_size
) {
1107 size
= min_t(sector_t
, p_size
, m_size
);
1110 size
= la_size_sect
;
1111 if (m_size
&& m_size
< size
)
1113 if (p_size
&& p_size
< size
)
1124 drbd_err(device
, "Both nodes diskless!\n");
1128 drbd_err(device
, "Requested disk size is too big (%lu > %lu)\n",
1129 (unsigned long)u_size
>>1, (unsigned long)size
>>1);
1138 * drbd_check_al_size() - Ensures that the AL is of the right size
1139 * @device: DRBD device.
1141 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1142 * failed, and 0 on success. You should call drbd_md_sync() after you called
1145 static int drbd_check_al_size(struct drbd_device
*device
, struct disk_conf
*dc
)
1147 struct lru_cache
*n
, *t
;
1148 struct lc_element
*e
;
1149 unsigned int in_use
;
1152 if (device
->act_log
&&
1153 device
->act_log
->nr_elements
== dc
->al_extents
)
1157 t
= device
->act_log
;
1158 n
= lc_create("act_log", drbd_al_ext_cache
, AL_UPDATES_PER_TRANSACTION
,
1159 dc
->al_extents
, sizeof(struct lc_element
), 0);
1162 drbd_err(device
, "Cannot allocate act_log lru!\n");
1165 spin_lock_irq(&device
->al_lock
);
1167 for (i
= 0; i
< t
->nr_elements
; i
++) {
1168 e
= lc_element_by_index(t
, i
);
1170 drbd_err(device
, "refcnt(%d)==%d\n",
1171 e
->lc_number
, e
->refcnt
);
1172 in_use
+= e
->refcnt
;
1176 device
->act_log
= n
;
1177 spin_unlock_irq(&device
->al_lock
);
1179 drbd_err(device
, "Activity log still in use!\n");
1185 drbd_md_mark_dirty(device
); /* we changed device->act_log->nr_elemens */
1189 static void blk_queue_discard_granularity(struct request_queue
*q
, unsigned int granularity
)
1191 q
->limits
.discard_granularity
= granularity
;
1194 static unsigned int drbd_max_discard_sectors(struct drbd_connection
*connection
)
1196 /* when we introduced REQ_WRITE_SAME support, we also bumped
1197 * our maximum supported batch bio size used for discards. */
1198 if (connection
->agreed_features
& DRBD_FF_WSAME
)
1199 return DRBD_MAX_BBIO_SECTORS
;
1200 /* before, with DRBD <= 8.4.6, we only allowed up to one AL_EXTENT_SIZE. */
1201 return AL_EXTENT_SIZE
>> 9;
1204 static void decide_on_discard_support(struct drbd_device
*device
,
1205 struct request_queue
*q
,
1206 struct request_queue
*b
,
1207 bool discard_zeroes_if_aligned
)
1209 /* q = drbd device queue (device->rq_queue)
1210 * b = backing device queue (device->ldev->backing_bdev->bd_disk->queue),
1211 * or NULL if diskless
1213 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
1214 bool can_do
= b
? blk_queue_discard(b
) : true;
1216 if (can_do
&& connection
->cstate
>= C_CONNECTED
&& !(connection
->agreed_features
& DRBD_FF_TRIM
)) {
1218 drbd_info(connection
, "peer DRBD too old, does not support TRIM: disabling discards\n");
1221 /* We don't care for the granularity, really.
1222 * Stacking limits below should fix it for the local
1223 * device. Whether or not it is a suitable granularity
1224 * on the remote device is not our problem, really. If
1225 * you care, you need to use devices with similar
1226 * topology on all peers. */
1227 blk_queue_discard_granularity(q
, 512);
1228 q
->limits
.max_discard_sectors
= drbd_max_discard_sectors(connection
);
1229 blk_queue_flag_set(QUEUE_FLAG_DISCARD
, q
);
1230 q
->limits
.max_write_zeroes_sectors
= drbd_max_discard_sectors(connection
);
1232 blk_queue_flag_clear(QUEUE_FLAG_DISCARD
, q
);
1233 blk_queue_discard_granularity(q
, 0);
1234 q
->limits
.max_discard_sectors
= 0;
1235 q
->limits
.max_write_zeroes_sectors
= 0;
1239 static void fixup_discard_if_not_supported(struct request_queue
*q
)
1241 /* To avoid confusion, if this queue does not support discard, clear
1242 * max_discard_sectors, which is what lsblk -D reports to the user.
1243 * Older kernels got this wrong in "stack limits".
1245 if (!blk_queue_discard(q
)) {
1246 blk_queue_max_discard_sectors(q
, 0);
1247 blk_queue_discard_granularity(q
, 0);
1251 static void fixup_write_zeroes(struct drbd_device
*device
, struct request_queue
*q
)
1253 /* Fixup max_write_zeroes_sectors after blk_stack_limits():
1254 * if we can handle "zeroes" efficiently on the protocol,
1255 * we want to do that, even if our backend does not announce
1256 * max_write_zeroes_sectors itself. */
1257 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
1258 /* If the peer announces WZEROES support, use it. Otherwise, rather
1259 * send explicit zeroes than rely on some discard-zeroes-data magic. */
1260 if (connection
->agreed_features
& DRBD_FF_WZEROES
)
1261 q
->limits
.max_write_zeroes_sectors
= DRBD_MAX_BBIO_SECTORS
;
1263 q
->limits
.max_write_zeroes_sectors
= 0;
1266 static void decide_on_write_same_support(struct drbd_device
*device
,
1267 struct request_queue
*q
,
1268 struct request_queue
*b
, struct o_qlim
*o
,
1269 bool disable_write_same
)
1271 struct drbd_peer_device
*peer_device
= first_peer_device(device
);
1272 struct drbd_connection
*connection
= peer_device
->connection
;
1273 bool can_do
= b
? b
->limits
.max_write_same_sectors
: true;
1275 if (can_do
&& disable_write_same
) {
1277 drbd_info(peer_device
, "WRITE_SAME disabled by config\n");
1280 if (can_do
&& connection
->cstate
>= C_CONNECTED
&& !(connection
->agreed_features
& DRBD_FF_WSAME
)) {
1282 drbd_info(peer_device
, "peer does not support WRITE_SAME\n");
1286 /* logical block size; queue_logical_block_size(NULL) is 512 */
1287 unsigned int peer_lbs
= be32_to_cpu(o
->logical_block_size
);
1288 unsigned int me_lbs_b
= queue_logical_block_size(b
);
1289 unsigned int me_lbs
= queue_logical_block_size(q
);
1291 if (me_lbs_b
!= me_lbs
) {
1293 "logical block size of local backend does not match (drbd:%u, backend:%u); was this a late attach?\n",
1295 /* rather disable write same than trigger some BUG_ON later in the scsi layer. */
1298 if (me_lbs_b
!= peer_lbs
) {
1299 drbd_warn(peer_device
, "logical block sizes do not match (me:%u, peer:%u); this may cause problems.\n",
1302 drbd_dbg(peer_device
, "logical block size mismatch: WRITE_SAME disabled.\n");
1305 me_lbs
= max(me_lbs
, me_lbs_b
);
1306 /* We cannot change the logical block size of an in-use queue.
1307 * We can only hope that access happens to be properly aligned.
1308 * If not, the peer will likely produce an IO error, and detach. */
1309 if (peer_lbs
> me_lbs
) {
1310 if (device
->state
.role
!= R_PRIMARY
) {
1311 blk_queue_logical_block_size(q
, peer_lbs
);
1312 drbd_warn(peer_device
, "logical block size set to %u\n", peer_lbs
);
1314 drbd_warn(peer_device
,
1315 "current Primary must NOT adjust logical block size (%u -> %u); hope for the best.\n",
1320 if (can_do
&& !o
->write_same_capable
) {
1321 /* If we introduce an open-coded write-same loop on the receiving side,
1322 * the peer would present itself as "capable". */
1323 drbd_dbg(peer_device
, "WRITE_SAME disabled (peer device not capable)\n");
1328 blk_queue_max_write_same_sectors(q
, can_do
? DRBD_MAX_BBIO_SECTORS
: 0);
1331 static void drbd_setup_queue_param(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
,
1332 unsigned int max_bio_size
, struct o_qlim
*o
)
1334 struct request_queue
* const q
= device
->rq_queue
;
1335 unsigned int max_hw_sectors
= max_bio_size
>> 9;
1336 unsigned int max_segments
= 0;
1337 struct request_queue
*b
= NULL
;
1338 struct disk_conf
*dc
;
1339 bool discard_zeroes_if_aligned
= true;
1340 bool disable_write_same
= false;
1343 b
= bdev
->backing_bdev
->bd_disk
->queue
;
1345 max_hw_sectors
= min(queue_max_hw_sectors(b
), max_bio_size
>> 9);
1347 dc
= rcu_dereference(device
->ldev
->disk_conf
);
1348 max_segments
= dc
->max_bio_bvecs
;
1349 discard_zeroes_if_aligned
= dc
->discard_zeroes_if_aligned
;
1350 disable_write_same
= dc
->disable_write_same
;
1353 blk_set_stacking_limits(&q
->limits
);
1356 blk_queue_max_hw_sectors(q
, max_hw_sectors
);
1357 /* This is the workaround for "bio would need to, but cannot, be split" */
1358 blk_queue_max_segments(q
, max_segments
? max_segments
: BLK_MAX_SEGMENTS
);
1359 blk_queue_segment_boundary(q
, PAGE_SIZE
-1);
1360 decide_on_discard_support(device
, q
, b
, discard_zeroes_if_aligned
);
1361 decide_on_write_same_support(device
, q
, b
, o
, disable_write_same
);
1364 blk_stack_limits(&q
->limits
, &b
->limits
, 0);
1365 blk_queue_update_readahead(q
);
1367 fixup_discard_if_not_supported(q
);
1368 fixup_write_zeroes(device
, q
);
1371 void drbd_reconsider_queue_parameters(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
, struct o_qlim
*o
)
1373 unsigned int now
, new, local
, peer
;
1375 now
= queue_max_hw_sectors(device
->rq_queue
) << 9;
1376 local
= device
->local_max_bio_size
; /* Eventually last known value, from volatile memory */
1377 peer
= device
->peer_max_bio_size
; /* Eventually last known value, from meta data */
1380 local
= queue_max_hw_sectors(bdev
->backing_bdev
->bd_disk
->queue
) << 9;
1381 device
->local_max_bio_size
= local
;
1383 local
= min(local
, DRBD_MAX_BIO_SIZE
);
1385 /* We may ignore peer limits if the peer is modern enough.
1386 Because new from 8.3.8 onwards the peer can use multiple
1387 BIOs for a single peer_request */
1388 if (device
->state
.conn
>= C_WF_REPORT_PARAMS
) {
1389 if (first_peer_device(device
)->connection
->agreed_pro_version
< 94)
1390 peer
= min(device
->peer_max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
1391 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1392 else if (first_peer_device(device
)->connection
->agreed_pro_version
== 94)
1393 peer
= DRBD_MAX_SIZE_H80_PACKET
;
1394 else if (first_peer_device(device
)->connection
->agreed_pro_version
< 100)
1395 peer
= DRBD_MAX_BIO_SIZE_P95
; /* drbd 8.3.8 onwards, before 8.4.0 */
1397 peer
= DRBD_MAX_BIO_SIZE
;
1399 /* We may later detach and re-attach on a disconnected Primary.
1400 * Avoid this setting to jump back in that case.
1401 * We want to store what we know the peer DRBD can handle,
1402 * not what the peer IO backend can handle. */
1403 if (peer
> device
->peer_max_bio_size
)
1404 device
->peer_max_bio_size
= peer
;
1406 new = min(local
, peer
);
1408 if (device
->state
.role
== R_PRIMARY
&& new < now
)
1409 drbd_err(device
, "ASSERT FAILED new < now; (%u < %u)\n", new, now
);
1412 drbd_info(device
, "max BIO size = %u\n", new);
1414 drbd_setup_queue_param(device
, bdev
, new, o
);
1417 /* Starts the worker thread */
1418 static void conn_reconfig_start(struct drbd_connection
*connection
)
1420 drbd_thread_start(&connection
->worker
);
1421 drbd_flush_workqueue(&connection
->sender_work
);
1424 /* if still unconfigured, stops worker again. */
1425 static void conn_reconfig_done(struct drbd_connection
*connection
)
1428 spin_lock_irq(&connection
->resource
->req_lock
);
1429 stop_threads
= conn_all_vols_unconf(connection
) &&
1430 connection
->cstate
== C_STANDALONE
;
1431 spin_unlock_irq(&connection
->resource
->req_lock
);
1433 /* ack_receiver thread and ack_sender workqueue are implicitly
1434 * stopped by receiver in conn_disconnect() */
1435 drbd_thread_stop(&connection
->receiver
);
1436 drbd_thread_stop(&connection
->worker
);
1440 /* Make sure IO is suspended before calling this function(). */
1441 static void drbd_suspend_al(struct drbd_device
*device
)
1445 if (!lc_try_lock(device
->act_log
)) {
1446 drbd_warn(device
, "Failed to lock al in drbd_suspend_al()\n");
1450 drbd_al_shrink(device
);
1451 spin_lock_irq(&device
->resource
->req_lock
);
1452 if (device
->state
.conn
< C_CONNECTED
)
1453 s
= !test_and_set_bit(AL_SUSPENDED
, &device
->flags
);
1454 spin_unlock_irq(&device
->resource
->req_lock
);
1455 lc_unlock(device
->act_log
);
1458 drbd_info(device
, "Suspended AL updates\n");
1462 static bool should_set_defaults(struct genl_info
*info
)
1464 unsigned flags
= ((struct drbd_genlmsghdr
*)info
->userhdr
)->flags
;
1465 return 0 != (flags
& DRBD_GENL_F_SET_DEFAULTS
);
1468 static unsigned int drbd_al_extents_max(struct drbd_backing_dev
*bdev
)
1470 /* This is limited by 16 bit "slot" numbers,
1471 * and by available on-disk context storage.
1473 * Also (u16)~0 is special (denotes a "free" extent).
1475 * One transaction occupies one 4kB on-disk block,
1476 * we have n such blocks in the on disk ring buffer,
1477 * the "current" transaction may fail (n-1),
1478 * and there is 919 slot numbers context information per transaction.
1480 * 72 transaction blocks amounts to more than 2**16 context slots,
1481 * so cap there first.
1483 const unsigned int max_al_nr
= DRBD_AL_EXTENTS_MAX
;
1484 const unsigned int sufficient_on_disk
=
1485 (max_al_nr
+ AL_CONTEXT_PER_TRANSACTION
-1)
1486 /AL_CONTEXT_PER_TRANSACTION
;
1488 unsigned int al_size_4k
= bdev
->md
.al_size_4k
;
1490 if (al_size_4k
> sufficient_on_disk
)
1493 return (al_size_4k
- 1) * AL_CONTEXT_PER_TRANSACTION
;
1496 static bool write_ordering_changed(struct disk_conf
*a
, struct disk_conf
*b
)
1498 return a
->disk_barrier
!= b
->disk_barrier
||
1499 a
->disk_flushes
!= b
->disk_flushes
||
1500 a
->disk_drain
!= b
->disk_drain
;
1503 static void sanitize_disk_conf(struct drbd_device
*device
, struct disk_conf
*disk_conf
,
1504 struct drbd_backing_dev
*nbc
)
1506 struct request_queue
* const q
= nbc
->backing_bdev
->bd_disk
->queue
;
1508 if (disk_conf
->al_extents
< DRBD_AL_EXTENTS_MIN
)
1509 disk_conf
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1510 if (disk_conf
->al_extents
> drbd_al_extents_max(nbc
))
1511 disk_conf
->al_extents
= drbd_al_extents_max(nbc
);
1513 if (!blk_queue_discard(q
)) {
1514 if (disk_conf
->rs_discard_granularity
) {
1515 disk_conf
->rs_discard_granularity
= 0; /* disable feature */
1516 drbd_info(device
, "rs_discard_granularity feature disabled\n");
1520 if (disk_conf
->rs_discard_granularity
) {
1521 int orig_value
= disk_conf
->rs_discard_granularity
;
1524 if (q
->limits
.discard_granularity
> disk_conf
->rs_discard_granularity
)
1525 disk_conf
->rs_discard_granularity
= q
->limits
.discard_granularity
;
1527 remainder
= disk_conf
->rs_discard_granularity
% q
->limits
.discard_granularity
;
1528 disk_conf
->rs_discard_granularity
+= remainder
;
1530 if (disk_conf
->rs_discard_granularity
> q
->limits
.max_discard_sectors
<< 9)
1531 disk_conf
->rs_discard_granularity
= q
->limits
.max_discard_sectors
<< 9;
1533 if (disk_conf
->rs_discard_granularity
!= orig_value
)
1534 drbd_info(device
, "rs_discard_granularity changed to %d\n",
1535 disk_conf
->rs_discard_granularity
);
1539 static int disk_opts_check_al_size(struct drbd_device
*device
, struct disk_conf
*dc
)
1543 if (device
->act_log
&&
1544 device
->act_log
->nr_elements
== dc
->al_extents
)
1547 drbd_suspend_io(device
);
1548 /* If IO completion is currently blocked, we would likely wait
1549 * "forever" for the activity log to become unused. So we don't. */
1550 if (atomic_read(&device
->ap_bio_cnt
))
1553 wait_event(device
->al_wait
, lc_try_lock(device
->act_log
));
1554 drbd_al_shrink(device
);
1555 err
= drbd_check_al_size(device
, dc
);
1556 lc_unlock(device
->act_log
);
1557 wake_up(&device
->al_wait
);
1559 drbd_resume_io(device
);
1563 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1565 struct drbd_config_context adm_ctx
;
1566 enum drbd_ret_code retcode
;
1567 struct drbd_device
*device
;
1568 struct disk_conf
*new_disk_conf
, *old_disk_conf
;
1569 struct fifo_buffer
*old_plan
= NULL
, *new_plan
= NULL
;
1571 unsigned int fifo_size
;
1573 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
1574 if (!adm_ctx
.reply_skb
)
1576 if (retcode
!= NO_ERROR
)
1579 device
= adm_ctx
.device
;
1580 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
1582 /* we also need a disk
1583 * to change the options on */
1584 if (!get_ldev(device
)) {
1585 retcode
= ERR_NO_DISK
;
1589 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1590 if (!new_disk_conf
) {
1591 retcode
= ERR_NOMEM
;
1595 mutex_lock(&device
->resource
->conf_update
);
1596 old_disk_conf
= device
->ldev
->disk_conf
;
1597 *new_disk_conf
= *old_disk_conf
;
1598 if (should_set_defaults(info
))
1599 set_disk_conf_defaults(new_disk_conf
);
1601 err
= disk_conf_from_attrs_for_change(new_disk_conf
, info
);
1602 if (err
&& err
!= -ENOMSG
) {
1603 retcode
= ERR_MANDATORY_TAG
;
1604 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
1608 if (!expect(new_disk_conf
->resync_rate
>= 1))
1609 new_disk_conf
->resync_rate
= 1;
1611 sanitize_disk_conf(device
, new_disk_conf
, device
->ldev
);
1613 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1614 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1616 fifo_size
= (new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
1617 if (fifo_size
!= device
->rs_plan_s
->size
) {
1618 new_plan
= fifo_alloc(fifo_size
);
1620 drbd_err(device
, "kmalloc of fifo_buffer failed");
1621 retcode
= ERR_NOMEM
;
1626 err
= disk_opts_check_al_size(device
, new_disk_conf
);
1628 /* Could be just "busy". Ignore?
1629 * Introduce dedicated error code? */
1630 drbd_msg_put_info(adm_ctx
.reply_skb
,
1631 "Try again without changing current al-extents setting");
1632 retcode
= ERR_NOMEM
;
1636 lock_all_resources();
1637 retcode
= drbd_resync_after_valid(device
, new_disk_conf
->resync_after
);
1638 if (retcode
== NO_ERROR
) {
1639 rcu_assign_pointer(device
->ldev
->disk_conf
, new_disk_conf
);
1640 drbd_resync_after_changed(device
);
1642 unlock_all_resources();
1644 if (retcode
!= NO_ERROR
)
1648 old_plan
= device
->rs_plan_s
;
1649 rcu_assign_pointer(device
->rs_plan_s
, new_plan
);
1652 mutex_unlock(&device
->resource
->conf_update
);
1654 if (new_disk_conf
->al_updates
)
1655 device
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
1657 device
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
1659 if (new_disk_conf
->md_flushes
)
1660 clear_bit(MD_NO_FUA
, &device
->flags
);
1662 set_bit(MD_NO_FUA
, &device
->flags
);
1664 if (write_ordering_changed(old_disk_conf
, new_disk_conf
))
1665 drbd_bump_write_ordering(device
->resource
, NULL
, WO_BDEV_FLUSH
);
1667 if (old_disk_conf
->discard_zeroes_if_aligned
!= new_disk_conf
->discard_zeroes_if_aligned
1668 || old_disk_conf
->disable_write_same
!= new_disk_conf
->disable_write_same
)
1669 drbd_reconsider_queue_parameters(device
, device
->ldev
, NULL
);
1671 drbd_md_sync(device
);
1673 if (device
->state
.conn
>= C_CONNECTED
) {
1674 struct drbd_peer_device
*peer_device
;
1676 for_each_peer_device(peer_device
, device
)
1677 drbd_send_sync_param(peer_device
);
1681 kfree(old_disk_conf
);
1683 mod_timer(&device
->request_timer
, jiffies
+ HZ
);
1687 mutex_unlock(&device
->resource
->conf_update
);
1689 kfree(new_disk_conf
);
1694 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
1696 drbd_adm_finish(&adm_ctx
, info
, retcode
);
1700 static struct block_device
*open_backing_dev(struct drbd_device
*device
,
1701 const char *bdev_path
, void *claim_ptr
, bool do_bd_link
)
1703 struct block_device
*bdev
;
1706 bdev
= blkdev_get_by_path(bdev_path
,
1707 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
, claim_ptr
);
1709 drbd_err(device
, "open(\"%s\") failed with %ld\n",
1710 bdev_path
, PTR_ERR(bdev
));
1717 err
= bd_link_disk_holder(bdev
, device
->vdisk
);
1719 blkdev_put(bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1720 drbd_err(device
, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1722 bdev
= ERR_PTR(err
);
1727 static int open_backing_devices(struct drbd_device
*device
,
1728 struct disk_conf
*new_disk_conf
,
1729 struct drbd_backing_dev
*nbc
)
1731 struct block_device
*bdev
;
1733 bdev
= open_backing_dev(device
, new_disk_conf
->backing_dev
, device
, true);
1735 return ERR_OPEN_DISK
;
1736 nbc
->backing_bdev
= bdev
;
1739 * meta_dev_idx >= 0: external fixed size, possibly multiple
1740 * drbd sharing one meta device. TODO in that case, paranoia
1741 * check that [md_bdev, meta_dev_idx] is not yet used by some
1742 * other drbd minor! (if you use drbd.conf + drbdadm, that
1743 * should check it for you already; but if you don't, or
1744 * someone fooled it, we need to double check here)
1746 bdev
= open_backing_dev(device
, new_disk_conf
->meta_dev
,
1747 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1748 * if potentially shared with other drbd minors */
1749 (new_disk_conf
->meta_dev_idx
< 0) ? (void*)device
: (void*)drbd_m_holder
,
1750 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1751 * as would happen with internal metadata. */
1752 (new_disk_conf
->meta_dev_idx
!= DRBD_MD_INDEX_FLEX_INT
&&
1753 new_disk_conf
->meta_dev_idx
!= DRBD_MD_INDEX_INTERNAL
));
1755 return ERR_OPEN_MD_DISK
;
1756 nbc
->md_bdev
= bdev
;
1760 static void close_backing_dev(struct drbd_device
*device
, struct block_device
*bdev
,
1766 bd_unlink_disk_holder(bdev
, device
->vdisk
);
1767 blkdev_put(bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1770 void drbd_backing_dev_free(struct drbd_device
*device
, struct drbd_backing_dev
*ldev
)
1775 close_backing_dev(device
, ldev
->md_bdev
, ldev
->md_bdev
!= ldev
->backing_bdev
);
1776 close_backing_dev(device
, ldev
->backing_bdev
, true);
1778 kfree(ldev
->disk_conf
);
1782 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
)
1784 struct drbd_config_context adm_ctx
;
1785 struct drbd_device
*device
;
1786 struct drbd_peer_device
*peer_device
;
1787 struct drbd_connection
*connection
;
1789 enum drbd_ret_code retcode
;
1790 enum determine_dev_size dd
;
1791 sector_t max_possible_sectors
;
1792 sector_t min_md_device_sectors
;
1793 struct drbd_backing_dev
*nbc
= NULL
; /* new_backing_conf */
1794 struct disk_conf
*new_disk_conf
= NULL
;
1795 struct lru_cache
*resync_lru
= NULL
;
1796 struct fifo_buffer
*new_plan
= NULL
;
1797 union drbd_state ns
, os
;
1798 enum drbd_state_rv rv
;
1799 struct net_conf
*nc
;
1801 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
1802 if (!adm_ctx
.reply_skb
)
1804 if (retcode
!= NO_ERROR
)
1807 device
= adm_ctx
.device
;
1808 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
1809 peer_device
= first_peer_device(device
);
1810 connection
= peer_device
->connection
;
1811 conn_reconfig_start(connection
);
1813 /* if you want to reconfigure, please tear down first */
1814 if (device
->state
.disk
> D_DISKLESS
) {
1815 retcode
= ERR_DISK_CONFIGURED
;
1818 /* It may just now have detached because of IO error. Make sure
1819 * drbd_ldev_destroy is done already, we may end up here very fast,
1820 * e.g. if someone calls attach from the on-io-error handler,
1821 * to realize a "hot spare" feature (not that I'd recommend that) */
1822 wait_event(device
->misc_wait
, !test_bit(GOING_DISKLESS
, &device
->flags
));
1824 /* make sure there is no leftover from previous force-detach attempts */
1825 clear_bit(FORCE_DETACH
, &device
->flags
);
1826 clear_bit(WAS_IO_ERROR
, &device
->flags
);
1827 clear_bit(WAS_READ_ERROR
, &device
->flags
);
1829 /* and no leftover from previously aborted resync or verify, either */
1830 device
->rs_total
= 0;
1831 device
->rs_failed
= 0;
1832 atomic_set(&device
->rs_pending_cnt
, 0);
1834 /* allocation not in the IO path, drbdsetup context */
1835 nbc
= kzalloc(sizeof(struct drbd_backing_dev
), GFP_KERNEL
);
1837 retcode
= ERR_NOMEM
;
1840 spin_lock_init(&nbc
->md
.uuid_lock
);
1842 new_disk_conf
= kzalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1843 if (!new_disk_conf
) {
1844 retcode
= ERR_NOMEM
;
1847 nbc
->disk_conf
= new_disk_conf
;
1849 set_disk_conf_defaults(new_disk_conf
);
1850 err
= disk_conf_from_attrs(new_disk_conf
, info
);
1852 retcode
= ERR_MANDATORY_TAG
;
1853 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
1857 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1858 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1860 new_plan
= fifo_alloc((new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
);
1862 retcode
= ERR_NOMEM
;
1866 if (new_disk_conf
->meta_dev_idx
< DRBD_MD_INDEX_FLEX_INT
) {
1867 retcode
= ERR_MD_IDX_INVALID
;
1872 nc
= rcu_dereference(connection
->net_conf
);
1874 if (new_disk_conf
->fencing
== FP_STONITH
&& nc
->wire_protocol
== DRBD_PROT_A
) {
1876 retcode
= ERR_STONITH_AND_PROT_A
;
1882 retcode
= open_backing_devices(device
, new_disk_conf
, nbc
);
1883 if (retcode
!= NO_ERROR
)
1886 if ((nbc
->backing_bdev
== nbc
->md_bdev
) !=
1887 (new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_INTERNAL
||
1888 new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_FLEX_INT
)) {
1889 retcode
= ERR_MD_IDX_INVALID
;
1893 resync_lru
= lc_create("resync", drbd_bm_ext_cache
,
1894 1, 61, sizeof(struct bm_extent
),
1895 offsetof(struct bm_extent
, lce
));
1897 retcode
= ERR_NOMEM
;
1901 /* Read our meta data super block early.
1902 * This also sets other on-disk offsets. */
1903 retcode
= drbd_md_read(device
, nbc
);
1904 if (retcode
!= NO_ERROR
)
1907 sanitize_disk_conf(device
, new_disk_conf
, nbc
);
1909 if (drbd_get_max_capacity(nbc
) < new_disk_conf
->disk_size
) {
1910 drbd_err(device
, "max capacity %llu smaller than disk size %llu\n",
1911 (unsigned long long) drbd_get_max_capacity(nbc
),
1912 (unsigned long long) new_disk_conf
->disk_size
);
1913 retcode
= ERR_DISK_TOO_SMALL
;
1917 if (new_disk_conf
->meta_dev_idx
< 0) {
1918 max_possible_sectors
= DRBD_MAX_SECTORS_FLEX
;
1919 /* at least one MB, otherwise it does not make sense */
1920 min_md_device_sectors
= (2<<10);
1922 max_possible_sectors
= DRBD_MAX_SECTORS
;
1923 min_md_device_sectors
= MD_128MB_SECT
* (new_disk_conf
->meta_dev_idx
+ 1);
1926 if (drbd_get_capacity(nbc
->md_bdev
) < min_md_device_sectors
) {
1927 retcode
= ERR_MD_DISK_TOO_SMALL
;
1928 drbd_warn(device
, "refusing attach: md-device too small, "
1929 "at least %llu sectors needed for this meta-disk type\n",
1930 (unsigned long long) min_md_device_sectors
);
1934 /* Make sure the new disk is big enough
1935 * (we may currently be R_PRIMARY with no local disk...) */
1936 if (drbd_get_max_capacity(nbc
) < get_capacity(device
->vdisk
)) {
1937 retcode
= ERR_DISK_TOO_SMALL
;
1941 nbc
->known_size
= drbd_get_capacity(nbc
->backing_bdev
);
1943 if (nbc
->known_size
> max_possible_sectors
) {
1944 drbd_warn(device
, "==> truncating very big lower level device "
1945 "to currently maximum possible %llu sectors <==\n",
1946 (unsigned long long) max_possible_sectors
);
1947 if (new_disk_conf
->meta_dev_idx
>= 0)
1948 drbd_warn(device
, "==>> using internal or flexible "
1949 "meta data may help <<==\n");
1952 drbd_suspend_io(device
);
1953 /* also wait for the last barrier ack. */
1954 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1955 * We need a way to either ignore barrier acks for barriers sent before a device
1956 * was attached, or a way to wait for all pending barrier acks to come in.
1957 * As barriers are counted per resource,
1958 * we'd need to suspend io on all devices of a resource.
1960 wait_event(device
->misc_wait
, !atomic_read(&device
->ap_pending_cnt
) || drbd_suspended(device
));
1961 /* and for any other previously queued work */
1962 drbd_flush_workqueue(&connection
->sender_work
);
1964 rv
= _drbd_request_state(device
, NS(disk
, D_ATTACHING
), CS_VERBOSE
);
1965 retcode
= rv
; /* FIXME: Type mismatch. */
1966 drbd_resume_io(device
);
1967 if (rv
< SS_SUCCESS
)
1970 if (!get_ldev_if_state(device
, D_ATTACHING
))
1971 goto force_diskless
;
1973 if (!device
->bitmap
) {
1974 if (drbd_bm_init(device
)) {
1975 retcode
= ERR_NOMEM
;
1976 goto force_diskless_dec
;
1980 if (device
->state
.pdsk
!= D_UP_TO_DATE
&& device
->ed_uuid
&&
1981 (device
->state
.role
== R_PRIMARY
|| device
->state
.peer
== R_PRIMARY
) &&
1982 (device
->ed_uuid
& ~((u64
)1)) != (nbc
->md
.uuid
[UI_CURRENT
] & ~((u64
)1))) {
1983 drbd_err(device
, "Can only attach to data with current UUID=%016llX\n",
1984 (unsigned long long)device
->ed_uuid
);
1985 retcode
= ERR_DATA_NOT_CURRENT
;
1986 goto force_diskless_dec
;
1989 /* Since we are diskless, fix the activity log first... */
1990 if (drbd_check_al_size(device
, new_disk_conf
)) {
1991 retcode
= ERR_NOMEM
;
1992 goto force_diskless_dec
;
1995 /* Prevent shrinking of consistent devices ! */
1997 unsigned long long nsz
= drbd_new_dev_size(device
, nbc
, nbc
->disk_conf
->disk_size
, 0);
1998 unsigned long long eff
= nbc
->md
.la_size_sect
;
1999 if (drbd_md_test_flag(nbc
, MDF_CONSISTENT
) && nsz
< eff
) {
2000 if (nsz
== nbc
->disk_conf
->disk_size
) {
2001 drbd_warn(device
, "truncating a consistent device during attach (%llu < %llu)\n", nsz
, eff
);
2003 drbd_warn(device
, "refusing to truncate a consistent device (%llu < %llu)\n", nsz
, eff
);
2004 drbd_msg_sprintf_info(adm_ctx
.reply_skb
,
2005 "To-be-attached device has last effective > current size, and is consistent\n"
2006 "(%llu > %llu sectors). Refusing to attach.", eff
, nsz
);
2007 retcode
= ERR_IMPLICIT_SHRINK
;
2008 goto force_diskless_dec
;
2013 lock_all_resources();
2014 retcode
= drbd_resync_after_valid(device
, new_disk_conf
->resync_after
);
2015 if (retcode
!= NO_ERROR
) {
2016 unlock_all_resources();
2017 goto force_diskless_dec
;
2020 /* Reset the "barriers don't work" bits here, then force meta data to
2021 * be written, to ensure we determine if barriers are supported. */
2022 if (new_disk_conf
->md_flushes
)
2023 clear_bit(MD_NO_FUA
, &device
->flags
);
2025 set_bit(MD_NO_FUA
, &device
->flags
);
2027 /* Point of no return reached.
2028 * Devices and memory are no longer released by error cleanup below.
2029 * now device takes over responsibility, and the state engine should
2030 * clean it up somewhere. */
2031 D_ASSERT(device
, device
->ldev
== NULL
);
2033 device
->resync
= resync_lru
;
2034 device
->rs_plan_s
= new_plan
;
2037 new_disk_conf
= NULL
;
2040 drbd_resync_after_changed(device
);
2041 drbd_bump_write_ordering(device
->resource
, device
->ldev
, WO_BDEV_FLUSH
);
2042 unlock_all_resources();
2044 if (drbd_md_test_flag(device
->ldev
, MDF_CRASHED_PRIMARY
))
2045 set_bit(CRASHED_PRIMARY
, &device
->flags
);
2047 clear_bit(CRASHED_PRIMARY
, &device
->flags
);
2049 if (drbd_md_test_flag(device
->ldev
, MDF_PRIMARY_IND
) &&
2050 !(device
->state
.role
== R_PRIMARY
&& device
->resource
->susp_nod
))
2051 set_bit(CRASHED_PRIMARY
, &device
->flags
);
2053 device
->send_cnt
= 0;
2054 device
->recv_cnt
= 0;
2055 device
->read_cnt
= 0;
2056 device
->writ_cnt
= 0;
2058 drbd_reconsider_queue_parameters(device
, device
->ldev
, NULL
);
2060 /* If I am currently not R_PRIMARY,
2061 * but meta data primary indicator is set,
2062 * I just now recover from a hard crash,
2063 * and have been R_PRIMARY before that crash.
2065 * Now, if I had no connection before that crash
2066 * (have been degraded R_PRIMARY), chances are that
2067 * I won't find my peer now either.
2069 * In that case, and _only_ in that case,
2070 * we use the degr-wfc-timeout instead of the default,
2071 * so we can automatically recover from a crash of a
2072 * degraded but active "cluster" after a certain timeout.
2074 clear_bit(USE_DEGR_WFC_T
, &device
->flags
);
2075 if (device
->state
.role
!= R_PRIMARY
&&
2076 drbd_md_test_flag(device
->ldev
, MDF_PRIMARY_IND
) &&
2077 !drbd_md_test_flag(device
->ldev
, MDF_CONNECTED_IND
))
2078 set_bit(USE_DEGR_WFC_T
, &device
->flags
);
2080 dd
= drbd_determine_dev_size(device
, 0, NULL
);
2081 if (dd
<= DS_ERROR
) {
2082 retcode
= ERR_NOMEM_BITMAP
;
2083 goto force_diskless_dec
;
2084 } else if (dd
== DS_GREW
)
2085 set_bit(RESYNC_AFTER_NEG
, &device
->flags
);
2087 if (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
) ||
2088 (test_bit(CRASHED_PRIMARY
, &device
->flags
) &&
2089 drbd_md_test_flag(device
->ldev
, MDF_AL_DISABLED
))) {
2090 drbd_info(device
, "Assuming that all blocks are out of sync "
2091 "(aka FullSync)\n");
2092 if (drbd_bitmap_io(device
, &drbd_bmio_set_n_write
,
2093 "set_n_write from attaching", BM_LOCKED_MASK
)) {
2094 retcode
= ERR_IO_MD_DISK
;
2095 goto force_diskless_dec
;
2098 if (drbd_bitmap_io(device
, &drbd_bm_read
,
2099 "read from attaching", BM_LOCKED_MASK
)) {
2100 retcode
= ERR_IO_MD_DISK
;
2101 goto force_diskless_dec
;
2105 if (_drbd_bm_total_weight(device
) == drbd_bm_bits(device
))
2106 drbd_suspend_al(device
); /* IO is still suspended here... */
2108 spin_lock_irq(&device
->resource
->req_lock
);
2109 os
= drbd_read_state(device
);
2111 /* If MDF_CONSISTENT is not set go into inconsistent state,
2112 otherwise investigate MDF_WasUpToDate...
2113 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
2114 otherwise into D_CONSISTENT state.
2116 if (drbd_md_test_flag(device
->ldev
, MDF_CONSISTENT
)) {
2117 if (drbd_md_test_flag(device
->ldev
, MDF_WAS_UP_TO_DATE
))
2118 ns
.disk
= D_CONSISTENT
;
2120 ns
.disk
= D_OUTDATED
;
2122 ns
.disk
= D_INCONSISTENT
;
2125 if (drbd_md_test_flag(device
->ldev
, MDF_PEER_OUT_DATED
))
2126 ns
.pdsk
= D_OUTDATED
;
2129 if (ns
.disk
== D_CONSISTENT
&&
2130 (ns
.pdsk
== D_OUTDATED
|| rcu_dereference(device
->ldev
->disk_conf
)->fencing
== FP_DONT_CARE
))
2131 ns
.disk
= D_UP_TO_DATE
;
2133 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
2134 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
2135 this point, because drbd_request_state() modifies these
2138 if (rcu_dereference(device
->ldev
->disk_conf
)->al_updates
)
2139 device
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
2141 device
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
2145 /* In case we are C_CONNECTED postpone any decision on the new disk
2146 state after the negotiation phase. */
2147 if (device
->state
.conn
== C_CONNECTED
) {
2148 device
->new_state_tmp
.i
= ns
.i
;
2150 ns
.disk
= D_NEGOTIATING
;
2152 /* We expect to receive up-to-date UUIDs soon.
2153 To avoid a race in receive_state, free p_uuid while
2154 holding req_lock. I.e. atomic with the state change */
2155 kfree(device
->p_uuid
);
2156 device
->p_uuid
= NULL
;
2159 rv
= _drbd_set_state(device
, ns
, CS_VERBOSE
, NULL
);
2160 spin_unlock_irq(&device
->resource
->req_lock
);
2162 if (rv
< SS_SUCCESS
)
2163 goto force_diskless_dec
;
2165 mod_timer(&device
->request_timer
, jiffies
+ HZ
);
2167 if (device
->state
.role
== R_PRIMARY
)
2168 device
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
2170 device
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
2172 drbd_md_mark_dirty(device
);
2173 drbd_md_sync(device
);
2175 kobject_uevent(&disk_to_dev(device
->vdisk
)->kobj
, KOBJ_CHANGE
);
2177 conn_reconfig_done(connection
);
2178 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2179 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2185 drbd_force_state(device
, NS(disk
, D_DISKLESS
));
2186 drbd_md_sync(device
);
2188 conn_reconfig_done(connection
);
2190 close_backing_dev(device
, nbc
->md_bdev
, nbc
->md_bdev
!= nbc
->backing_bdev
);
2191 close_backing_dev(device
, nbc
->backing_bdev
, true);
2194 kfree(new_disk_conf
);
2195 lc_destroy(resync_lru
);
2197 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2199 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2203 static int adm_detach(struct drbd_device
*device
, int force
)
2206 set_bit(FORCE_DETACH
, &device
->flags
);
2207 drbd_force_state(device
, NS(disk
, D_FAILED
));
2211 return drbd_request_detach_interruptible(device
);
2214 /* Detaching the disk is a process in multiple stages. First we need to lock
2215 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2216 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2217 * internal references as well.
2218 * Only then we have finally detached. */
2219 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
)
2221 struct drbd_config_context adm_ctx
;
2222 enum drbd_ret_code retcode
;
2223 struct detach_parms parms
= { };
2226 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2227 if (!adm_ctx
.reply_skb
)
2229 if (retcode
!= NO_ERROR
)
2232 if (info
->attrs
[DRBD_NLA_DETACH_PARMS
]) {
2233 err
= detach_parms_from_attrs(&parms
, info
);
2235 retcode
= ERR_MANDATORY_TAG
;
2236 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2241 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2242 retcode
= adm_detach(adm_ctx
.device
, parms
.force_detach
);
2243 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2245 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2249 static bool conn_resync_running(struct drbd_connection
*connection
)
2251 struct drbd_peer_device
*peer_device
;
2256 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2257 struct drbd_device
*device
= peer_device
->device
;
2258 if (device
->state
.conn
== C_SYNC_SOURCE
||
2259 device
->state
.conn
== C_SYNC_TARGET
||
2260 device
->state
.conn
== C_PAUSED_SYNC_S
||
2261 device
->state
.conn
== C_PAUSED_SYNC_T
) {
2271 static bool conn_ov_running(struct drbd_connection
*connection
)
2273 struct drbd_peer_device
*peer_device
;
2278 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2279 struct drbd_device
*device
= peer_device
->device
;
2280 if (device
->state
.conn
== C_VERIFY_S
||
2281 device
->state
.conn
== C_VERIFY_T
) {
2291 static enum drbd_ret_code
2292 _check_net_options(struct drbd_connection
*connection
, struct net_conf
*old_net_conf
, struct net_conf
*new_net_conf
)
2294 struct drbd_peer_device
*peer_device
;
2297 if (old_net_conf
&& connection
->cstate
== C_WF_REPORT_PARAMS
&& connection
->agreed_pro_version
< 100) {
2298 if (new_net_conf
->wire_protocol
!= old_net_conf
->wire_protocol
)
2299 return ERR_NEED_APV_100
;
2301 if (new_net_conf
->two_primaries
!= old_net_conf
->two_primaries
)
2302 return ERR_NEED_APV_100
;
2304 if (strcmp(new_net_conf
->integrity_alg
, old_net_conf
->integrity_alg
))
2305 return ERR_NEED_APV_100
;
2308 if (!new_net_conf
->two_primaries
&&
2309 conn_highest_role(connection
) == R_PRIMARY
&&
2310 conn_highest_peer(connection
) == R_PRIMARY
)
2311 return ERR_NEED_ALLOW_TWO_PRI
;
2313 if (new_net_conf
->two_primaries
&&
2314 (new_net_conf
->wire_protocol
!= DRBD_PROT_C
))
2315 return ERR_NOT_PROTO_C
;
2317 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2318 struct drbd_device
*device
= peer_device
->device
;
2319 if (get_ldev(device
)) {
2320 enum drbd_fencing_p fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
2322 if (new_net_conf
->wire_protocol
== DRBD_PROT_A
&& fp
== FP_STONITH
)
2323 return ERR_STONITH_AND_PROT_A
;
2325 if (device
->state
.role
== R_PRIMARY
&& new_net_conf
->discard_my_data
)
2326 return ERR_DISCARD_IMPOSSIBLE
;
2329 if (new_net_conf
->on_congestion
!= OC_BLOCK
&& new_net_conf
->wire_protocol
!= DRBD_PROT_A
)
2330 return ERR_CONG_NOT_PROTO_A
;
2335 static enum drbd_ret_code
2336 check_net_options(struct drbd_connection
*connection
, struct net_conf
*new_net_conf
)
2338 enum drbd_ret_code rv
;
2339 struct drbd_peer_device
*peer_device
;
2343 rv
= _check_net_options(connection
, rcu_dereference(connection
->net_conf
), new_net_conf
);
2346 /* connection->peer_devices protected by genl_lock() here */
2347 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2348 struct drbd_device
*device
= peer_device
->device
;
2349 if (!device
->bitmap
) {
2350 if (drbd_bm_init(device
))
2359 struct crypto_shash
*verify_tfm
;
2360 struct crypto_shash
*csums_tfm
;
2361 struct crypto_shash
*cram_hmac_tfm
;
2362 struct crypto_shash
*integrity_tfm
;
2366 alloc_shash(struct crypto_shash
**tfm
, char *tfm_name
, int err_alg
)
2371 *tfm
= crypto_alloc_shash(tfm_name
, 0, 0);
2380 static enum drbd_ret_code
2381 alloc_crypto(struct crypto
*crypto
, struct net_conf
*new_net_conf
)
2383 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
2384 enum drbd_ret_code rv
;
2386 rv
= alloc_shash(&crypto
->csums_tfm
, new_net_conf
->csums_alg
,
2390 rv
= alloc_shash(&crypto
->verify_tfm
, new_net_conf
->verify_alg
,
2394 rv
= alloc_shash(&crypto
->integrity_tfm
, new_net_conf
->integrity_alg
,
2398 if (new_net_conf
->cram_hmac_alg
[0] != 0) {
2399 snprintf(hmac_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)",
2400 new_net_conf
->cram_hmac_alg
);
2402 rv
= alloc_shash(&crypto
->cram_hmac_tfm
, hmac_name
,
2409 static void free_crypto(struct crypto
*crypto
)
2411 crypto_free_shash(crypto
->cram_hmac_tfm
);
2412 crypto_free_shash(crypto
->integrity_tfm
);
2413 crypto_free_shash(crypto
->csums_tfm
);
2414 crypto_free_shash(crypto
->verify_tfm
);
2417 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2419 struct drbd_config_context adm_ctx
;
2420 enum drbd_ret_code retcode
;
2421 struct drbd_connection
*connection
;
2422 struct net_conf
*old_net_conf
, *new_net_conf
= NULL
;
2424 int ovr
; /* online verify running */
2425 int rsr
; /* re-sync running */
2426 struct crypto crypto
= { };
2428 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2429 if (!adm_ctx
.reply_skb
)
2431 if (retcode
!= NO_ERROR
)
2434 connection
= adm_ctx
.connection
;
2435 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2437 new_net_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
2438 if (!new_net_conf
) {
2439 retcode
= ERR_NOMEM
;
2443 conn_reconfig_start(connection
);
2445 mutex_lock(&connection
->data
.mutex
);
2446 mutex_lock(&connection
->resource
->conf_update
);
2447 old_net_conf
= connection
->net_conf
;
2449 if (!old_net_conf
) {
2450 drbd_msg_put_info(adm_ctx
.reply_skb
, "net conf missing, try connect");
2451 retcode
= ERR_INVALID_REQUEST
;
2455 *new_net_conf
= *old_net_conf
;
2456 if (should_set_defaults(info
))
2457 set_net_conf_defaults(new_net_conf
);
2459 err
= net_conf_from_attrs_for_change(new_net_conf
, info
);
2460 if (err
&& err
!= -ENOMSG
) {
2461 retcode
= ERR_MANDATORY_TAG
;
2462 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2466 retcode
= check_net_options(connection
, new_net_conf
);
2467 if (retcode
!= NO_ERROR
)
2470 /* re-sync running */
2471 rsr
= conn_resync_running(connection
);
2472 if (rsr
&& strcmp(new_net_conf
->csums_alg
, old_net_conf
->csums_alg
)) {
2473 retcode
= ERR_CSUMS_RESYNC_RUNNING
;
2477 /* online verify running */
2478 ovr
= conn_ov_running(connection
);
2479 if (ovr
&& strcmp(new_net_conf
->verify_alg
, old_net_conf
->verify_alg
)) {
2480 retcode
= ERR_VERIFY_RUNNING
;
2484 retcode
= alloc_crypto(&crypto
, new_net_conf
);
2485 if (retcode
!= NO_ERROR
)
2488 rcu_assign_pointer(connection
->net_conf
, new_net_conf
);
2491 crypto_free_shash(connection
->csums_tfm
);
2492 connection
->csums_tfm
= crypto
.csums_tfm
;
2493 crypto
.csums_tfm
= NULL
;
2496 crypto_free_shash(connection
->verify_tfm
);
2497 connection
->verify_tfm
= crypto
.verify_tfm
;
2498 crypto
.verify_tfm
= NULL
;
2501 crypto_free_shash(connection
->integrity_tfm
);
2502 connection
->integrity_tfm
= crypto
.integrity_tfm
;
2503 if (connection
->cstate
>= C_WF_REPORT_PARAMS
&& connection
->agreed_pro_version
>= 100)
2504 /* Do this without trying to take connection->data.mutex again. */
2505 __drbd_send_protocol(connection
, P_PROTOCOL_UPDATE
);
2507 crypto_free_shash(connection
->cram_hmac_tfm
);
2508 connection
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2510 mutex_unlock(&connection
->resource
->conf_update
);
2511 mutex_unlock(&connection
->data
.mutex
);
2513 kfree(old_net_conf
);
2515 if (connection
->cstate
>= C_WF_REPORT_PARAMS
) {
2516 struct drbd_peer_device
*peer_device
;
2519 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
2520 drbd_send_sync_param(peer_device
);
2526 mutex_unlock(&connection
->resource
->conf_update
);
2527 mutex_unlock(&connection
->data
.mutex
);
2528 free_crypto(&crypto
);
2529 kfree(new_net_conf
);
2531 conn_reconfig_done(connection
);
2533 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2535 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2539 static void connection_to_info(struct connection_info
*info
,
2540 struct drbd_connection
*connection
)
2542 info
->conn_connection_state
= connection
->cstate
;
2543 info
->conn_role
= conn_highest_peer(connection
);
2546 static void peer_device_to_info(struct peer_device_info
*info
,
2547 struct drbd_peer_device
*peer_device
)
2549 struct drbd_device
*device
= peer_device
->device
;
2551 info
->peer_repl_state
=
2552 max_t(enum drbd_conns
, C_WF_REPORT_PARAMS
, device
->state
.conn
);
2553 info
->peer_disk_state
= device
->state
.pdsk
;
2554 info
->peer_resync_susp_user
= device
->state
.user_isp
;
2555 info
->peer_resync_susp_peer
= device
->state
.peer_isp
;
2556 info
->peer_resync_susp_dependency
= device
->state
.aftr_isp
;
2559 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
)
2561 struct connection_info connection_info
;
2562 enum drbd_notification_type flags
;
2563 unsigned int peer_devices
= 0;
2564 struct drbd_config_context adm_ctx
;
2565 struct drbd_peer_device
*peer_device
;
2566 struct net_conf
*old_net_conf
, *new_net_conf
= NULL
;
2567 struct crypto crypto
= { };
2568 struct drbd_resource
*resource
;
2569 struct drbd_connection
*connection
;
2570 enum drbd_ret_code retcode
;
2574 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2576 if (!adm_ctx
.reply_skb
)
2578 if (retcode
!= NO_ERROR
)
2580 if (!(adm_ctx
.my_addr
&& adm_ctx
.peer_addr
)) {
2581 drbd_msg_put_info(adm_ctx
.reply_skb
, "connection endpoint(s) missing");
2582 retcode
= ERR_INVALID_REQUEST
;
2586 /* No need for _rcu here. All reconfiguration is
2587 * strictly serialized on genl_lock(). We are protected against
2588 * concurrent reconfiguration/addition/deletion */
2589 for_each_resource(resource
, &drbd_resources
) {
2590 for_each_connection(connection
, resource
) {
2591 if (nla_len(adm_ctx
.my_addr
) == connection
->my_addr_len
&&
2592 !memcmp(nla_data(adm_ctx
.my_addr
), &connection
->my_addr
,
2593 connection
->my_addr_len
)) {
2594 retcode
= ERR_LOCAL_ADDR
;
2598 if (nla_len(adm_ctx
.peer_addr
) == connection
->peer_addr_len
&&
2599 !memcmp(nla_data(adm_ctx
.peer_addr
), &connection
->peer_addr
,
2600 connection
->peer_addr_len
)) {
2601 retcode
= ERR_PEER_ADDR
;
2607 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2608 connection
= first_connection(adm_ctx
.resource
);
2609 conn_reconfig_start(connection
);
2611 if (connection
->cstate
> C_STANDALONE
) {
2612 retcode
= ERR_NET_CONFIGURED
;
2616 /* allocation not in the IO path, drbdsetup / netlink process context */
2617 new_net_conf
= kzalloc(sizeof(*new_net_conf
), GFP_KERNEL
);
2618 if (!new_net_conf
) {
2619 retcode
= ERR_NOMEM
;
2623 set_net_conf_defaults(new_net_conf
);
2625 err
= net_conf_from_attrs(new_net_conf
, info
);
2626 if (err
&& err
!= -ENOMSG
) {
2627 retcode
= ERR_MANDATORY_TAG
;
2628 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2632 retcode
= check_net_options(connection
, new_net_conf
);
2633 if (retcode
!= NO_ERROR
)
2636 retcode
= alloc_crypto(&crypto
, new_net_conf
);
2637 if (retcode
!= NO_ERROR
)
2640 ((char *)new_net_conf
->shared_secret
)[SHARED_SECRET_MAX
-1] = 0;
2642 drbd_flush_workqueue(&connection
->sender_work
);
2644 mutex_lock(&adm_ctx
.resource
->conf_update
);
2645 old_net_conf
= connection
->net_conf
;
2647 retcode
= ERR_NET_CONFIGURED
;
2648 mutex_unlock(&adm_ctx
.resource
->conf_update
);
2651 rcu_assign_pointer(connection
->net_conf
, new_net_conf
);
2653 conn_free_crypto(connection
);
2654 connection
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2655 connection
->integrity_tfm
= crypto
.integrity_tfm
;
2656 connection
->csums_tfm
= crypto
.csums_tfm
;
2657 connection
->verify_tfm
= crypto
.verify_tfm
;
2659 connection
->my_addr_len
= nla_len(adm_ctx
.my_addr
);
2660 memcpy(&connection
->my_addr
, nla_data(adm_ctx
.my_addr
), connection
->my_addr_len
);
2661 connection
->peer_addr_len
= nla_len(adm_ctx
.peer_addr
);
2662 memcpy(&connection
->peer_addr
, nla_data(adm_ctx
.peer_addr
), connection
->peer_addr_len
);
2664 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2668 connection_to_info(&connection_info
, connection
);
2669 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
2670 mutex_lock(¬ification_mutex
);
2671 notify_connection_state(NULL
, 0, connection
, &connection_info
, NOTIFY_CREATE
| flags
);
2672 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2673 struct peer_device_info peer_device_info
;
2675 peer_device_to_info(&peer_device_info
, peer_device
);
2676 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
2677 notify_peer_device_state(NULL
, 0, peer_device
, &peer_device_info
, NOTIFY_CREATE
| flags
);
2679 mutex_unlock(¬ification_mutex
);
2680 mutex_unlock(&adm_ctx
.resource
->conf_update
);
2683 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2684 struct drbd_device
*device
= peer_device
->device
;
2685 device
->send_cnt
= 0;
2686 device
->recv_cnt
= 0;
2690 retcode
= conn_request_state(connection
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
2692 conn_reconfig_done(connection
);
2693 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2694 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2698 free_crypto(&crypto
);
2699 kfree(new_net_conf
);
2701 conn_reconfig_done(connection
);
2702 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2704 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2708 static enum drbd_state_rv
conn_try_disconnect(struct drbd_connection
*connection
, bool force
)
2710 enum drbd_conns cstate
;
2711 enum drbd_state_rv rv
;
2714 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
),
2715 force
? CS_HARD
: 0);
2718 case SS_NOTHING_TO_DO
:
2720 case SS_ALREADY_STANDALONE
:
2722 case SS_PRIMARY_NOP
:
2723 /* Our state checking code wants to see the peer outdated. */
2724 rv
= conn_request_state(connection
, NS2(conn
, C_DISCONNECTING
, pdsk
, D_OUTDATED
), 0);
2726 if (rv
== SS_OUTDATE_WO_CONN
) /* lost connection before graceful disconnect succeeded */
2727 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
), CS_VERBOSE
);
2730 case SS_CW_FAILED_BY_PEER
:
2731 spin_lock_irq(&connection
->resource
->req_lock
);
2732 cstate
= connection
->cstate
;
2733 spin_unlock_irq(&connection
->resource
->req_lock
);
2734 if (cstate
<= C_WF_CONNECTION
)
2736 /* The peer probably wants to see us outdated. */
2737 rv
= conn_request_state(connection
, NS2(conn
, C_DISCONNECTING
,
2738 disk
, D_OUTDATED
), 0);
2739 if (rv
== SS_IS_DISKLESS
|| rv
== SS_LOWER_THAN_OUTDATED
) {
2740 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
),
2745 /* no special handling necessary */
2748 if (rv
>= SS_SUCCESS
) {
2749 enum drbd_state_rv rv2
;
2750 /* No one else can reconfigure the network while I am here.
2751 * The state handling only uses drbd_thread_stop_nowait(),
2752 * we want to really wait here until the receiver is no more.
2754 drbd_thread_stop(&connection
->receiver
);
2756 /* Race breaker. This additional state change request may be
2757 * necessary, if this was a forced disconnect during a receiver
2758 * restart. We may have "killed" the receiver thread just
2759 * after drbd_receiver() returned. Typically, we should be
2760 * C_STANDALONE already, now, and this becomes a no-op.
2762 rv2
= conn_request_state(connection
, NS(conn
, C_STANDALONE
),
2763 CS_VERBOSE
| CS_HARD
);
2764 if (rv2
< SS_SUCCESS
)
2765 drbd_err(connection
,
2766 "unexpected rv2=%d in conn_try_disconnect()\n",
2768 /* Unlike in DRBD 9, the state engine has generated
2769 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2774 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2776 struct drbd_config_context adm_ctx
;
2777 struct disconnect_parms parms
;
2778 struct drbd_connection
*connection
;
2779 enum drbd_state_rv rv
;
2780 enum drbd_ret_code retcode
;
2783 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2784 if (!adm_ctx
.reply_skb
)
2786 if (retcode
!= NO_ERROR
)
2789 connection
= adm_ctx
.connection
;
2790 memset(&parms
, 0, sizeof(parms
));
2791 if (info
->attrs
[DRBD_NLA_DISCONNECT_PARMS
]) {
2792 err
= disconnect_parms_from_attrs(&parms
, info
);
2794 retcode
= ERR_MANDATORY_TAG
;
2795 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2800 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2801 rv
= conn_try_disconnect(connection
, parms
.force_disconnect
);
2802 if (rv
< SS_SUCCESS
)
2803 retcode
= rv
; /* FIXME: Type mismatch. */
2806 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2808 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2812 void resync_after_online_grow(struct drbd_device
*device
)
2814 int iass
; /* I am sync source */
2816 drbd_info(device
, "Resync of new storage after online grow\n");
2817 if (device
->state
.role
!= device
->state
.peer
)
2818 iass
= (device
->state
.role
== R_PRIMARY
);
2820 iass
= test_bit(RESOLVE_CONFLICTS
, &first_peer_device(device
)->connection
->flags
);
2823 drbd_start_resync(device
, C_SYNC_SOURCE
);
2825 _drbd_request_state(device
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
+ CS_SERIALIZE
);
2828 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
)
2830 struct drbd_config_context adm_ctx
;
2831 struct disk_conf
*old_disk_conf
, *new_disk_conf
= NULL
;
2832 struct resize_parms rs
;
2833 struct drbd_device
*device
;
2834 enum drbd_ret_code retcode
;
2835 enum determine_dev_size dd
;
2836 bool change_al_layout
= false;
2837 enum dds_flags ddsf
;
2841 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2842 if (!adm_ctx
.reply_skb
)
2844 if (retcode
!= NO_ERROR
)
2847 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2848 device
= adm_ctx
.device
;
2849 if (!get_ldev(device
)) {
2850 retcode
= ERR_NO_DISK
;
2854 memset(&rs
, 0, sizeof(struct resize_parms
));
2855 rs
.al_stripes
= device
->ldev
->md
.al_stripes
;
2856 rs
.al_stripe_size
= device
->ldev
->md
.al_stripe_size_4k
* 4;
2857 if (info
->attrs
[DRBD_NLA_RESIZE_PARMS
]) {
2858 err
= resize_parms_from_attrs(&rs
, info
);
2860 retcode
= ERR_MANDATORY_TAG
;
2861 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2866 if (device
->state
.conn
> C_CONNECTED
) {
2867 retcode
= ERR_RESIZE_RESYNC
;
2871 if (device
->state
.role
== R_SECONDARY
&&
2872 device
->state
.peer
== R_SECONDARY
) {
2873 retcode
= ERR_NO_PRIMARY
;
2877 if (rs
.no_resync
&& first_peer_device(device
)->connection
->agreed_pro_version
< 93) {
2878 retcode
= ERR_NEED_APV_93
;
2883 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
2885 if (u_size
!= (sector_t
)rs
.resize_size
) {
2886 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
2887 if (!new_disk_conf
) {
2888 retcode
= ERR_NOMEM
;
2893 if (device
->ldev
->md
.al_stripes
!= rs
.al_stripes
||
2894 device
->ldev
->md
.al_stripe_size_4k
!= rs
.al_stripe_size
/ 4) {
2895 u32 al_size_k
= rs
.al_stripes
* rs
.al_stripe_size
;
2897 if (al_size_k
> (16 * 1024 * 1024)) {
2898 retcode
= ERR_MD_LAYOUT_TOO_BIG
;
2902 if (al_size_k
< MD_32kB_SECT
/2) {
2903 retcode
= ERR_MD_LAYOUT_TOO_SMALL
;
2907 if (device
->state
.conn
!= C_CONNECTED
&& !rs
.resize_force
) {
2908 retcode
= ERR_MD_LAYOUT_CONNECTED
;
2912 change_al_layout
= true;
2915 if (device
->ldev
->known_size
!= drbd_get_capacity(device
->ldev
->backing_bdev
))
2916 device
->ldev
->known_size
= drbd_get_capacity(device
->ldev
->backing_bdev
);
2918 if (new_disk_conf
) {
2919 mutex_lock(&device
->resource
->conf_update
);
2920 old_disk_conf
= device
->ldev
->disk_conf
;
2921 *new_disk_conf
= *old_disk_conf
;
2922 new_disk_conf
->disk_size
= (sector_t
)rs
.resize_size
;
2923 rcu_assign_pointer(device
->ldev
->disk_conf
, new_disk_conf
);
2924 mutex_unlock(&device
->resource
->conf_update
);
2926 kfree(old_disk_conf
);
2927 new_disk_conf
= NULL
;
2930 ddsf
= (rs
.resize_force
? DDSF_FORCED
: 0) | (rs
.no_resync
? DDSF_NO_RESYNC
: 0);
2931 dd
= drbd_determine_dev_size(device
, ddsf
, change_al_layout
? &rs
: NULL
);
2932 drbd_md_sync(device
);
2934 if (dd
== DS_ERROR
) {
2935 retcode
= ERR_NOMEM_BITMAP
;
2937 } else if (dd
== DS_ERROR_SPACE_MD
) {
2938 retcode
= ERR_MD_LAYOUT_NO_FIT
;
2940 } else if (dd
== DS_ERROR_SHRINK
) {
2941 retcode
= ERR_IMPLICIT_SHRINK
;
2945 if (device
->state
.conn
== C_CONNECTED
) {
2947 set_bit(RESIZE_PENDING
, &device
->flags
);
2949 drbd_send_uuids(first_peer_device(device
));
2950 drbd_send_sizes(first_peer_device(device
), 1, ddsf
);
2954 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2956 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2961 kfree(new_disk_conf
);
2965 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2967 struct drbd_config_context adm_ctx
;
2968 enum drbd_ret_code retcode
;
2969 struct res_opts res_opts
;
2972 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2973 if (!adm_ctx
.reply_skb
)
2975 if (retcode
!= NO_ERROR
)
2978 res_opts
= adm_ctx
.resource
->res_opts
;
2979 if (should_set_defaults(info
))
2980 set_res_opts_defaults(&res_opts
);
2982 err
= res_opts_from_attrs(&res_opts
, info
);
2983 if (err
&& err
!= -ENOMSG
) {
2984 retcode
= ERR_MANDATORY_TAG
;
2985 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2989 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2990 err
= set_resource_options(adm_ctx
.resource
, &res_opts
);
2992 retcode
= ERR_INVALID_REQUEST
;
2994 retcode
= ERR_NOMEM
;
2996 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2999 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3003 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
)
3005 struct drbd_config_context adm_ctx
;
3006 struct drbd_device
*device
;
3007 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3009 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3010 if (!adm_ctx
.reply_skb
)
3012 if (retcode
!= NO_ERROR
)
3015 device
= adm_ctx
.device
;
3016 if (!get_ldev(device
)) {
3017 retcode
= ERR_NO_DISK
;
3021 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3023 /* If there is still bitmap IO pending, probably because of a previous
3024 * resync just being finished, wait for it before requesting a new resync.
3025 * Also wait for it's after_state_ch(). */
3026 drbd_suspend_io(device
);
3027 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
3028 drbd_flush_workqueue(&first_peer_device(device
)->connection
->sender_work
);
3030 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
3031 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
3032 * try to start a resync handshake as sync target for full sync.
3034 if (device
->state
.conn
== C_STANDALONE
&& device
->state
.role
== R_SECONDARY
) {
3035 retcode
= drbd_request_state(device
, NS(disk
, D_INCONSISTENT
));
3036 if (retcode
>= SS_SUCCESS
) {
3037 if (drbd_bitmap_io(device
, &drbd_bmio_set_n_write
,
3038 "set_n_write from invalidate", BM_LOCKED_MASK
))
3039 retcode
= ERR_IO_MD_DISK
;
3042 retcode
= drbd_request_state(device
, NS(conn
, C_STARTING_SYNC_T
));
3043 drbd_resume_io(device
);
3044 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3047 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3051 static int drbd_adm_simple_request_state(struct sk_buff
*skb
, struct genl_info
*info
,
3052 union drbd_state mask
, union drbd_state val
)
3054 struct drbd_config_context adm_ctx
;
3055 enum drbd_ret_code retcode
;
3057 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3058 if (!adm_ctx
.reply_skb
)
3060 if (retcode
!= NO_ERROR
)
3063 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3064 retcode
= drbd_request_state(adm_ctx
.device
, mask
, val
);
3065 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3067 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3071 static int drbd_bmio_set_susp_al(struct drbd_device
*device
) __must_hold(local
)
3075 rv
= drbd_bmio_set_n_write(device
);
3076 drbd_suspend_al(device
);
3080 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
)
3082 struct drbd_config_context adm_ctx
;
3083 int retcode
; /* drbd_ret_code, drbd_state_rv */
3084 struct drbd_device
*device
;
3086 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3087 if (!adm_ctx
.reply_skb
)
3089 if (retcode
!= NO_ERROR
)
3092 device
= adm_ctx
.device
;
3093 if (!get_ldev(device
)) {
3094 retcode
= ERR_NO_DISK
;
3098 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3100 /* If there is still bitmap IO pending, probably because of a previous
3101 * resync just being finished, wait for it before requesting a new resync.
3102 * Also wait for it's after_state_ch(). */
3103 drbd_suspend_io(device
);
3104 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
3105 drbd_flush_workqueue(&first_peer_device(device
)->connection
->sender_work
);
3107 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
3108 * in the bitmap. Otherwise, try to start a resync handshake
3109 * as sync source for full sync.
3111 if (device
->state
.conn
== C_STANDALONE
&& device
->state
.role
== R_PRIMARY
) {
3112 /* The peer will get a resync upon connect anyways. Just make that
3113 into a full resync. */
3114 retcode
= drbd_request_state(device
, NS(pdsk
, D_INCONSISTENT
));
3115 if (retcode
>= SS_SUCCESS
) {
3116 if (drbd_bitmap_io(device
, &drbd_bmio_set_susp_al
,
3117 "set_n_write from invalidate_peer",
3118 BM_LOCKED_SET_ALLOWED
))
3119 retcode
= ERR_IO_MD_DISK
;
3122 retcode
= drbd_request_state(device
, NS(conn
, C_STARTING_SYNC_S
));
3123 drbd_resume_io(device
);
3124 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3127 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3131 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
)
3133 struct drbd_config_context adm_ctx
;
3134 enum drbd_ret_code retcode
;
3136 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3137 if (!adm_ctx
.reply_skb
)
3139 if (retcode
!= NO_ERROR
)
3142 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3143 if (drbd_request_state(adm_ctx
.device
, NS(user_isp
, 1)) == SS_NOTHING_TO_DO
)
3144 retcode
= ERR_PAUSE_IS_SET
;
3145 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3147 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3151 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
)
3153 struct drbd_config_context adm_ctx
;
3154 union drbd_dev_state s
;
3155 enum drbd_ret_code retcode
;
3157 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3158 if (!adm_ctx
.reply_skb
)
3160 if (retcode
!= NO_ERROR
)
3163 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3164 if (drbd_request_state(adm_ctx
.device
, NS(user_isp
, 0)) == SS_NOTHING_TO_DO
) {
3165 s
= adm_ctx
.device
->state
;
3166 if (s
.conn
== C_PAUSED_SYNC_S
|| s
.conn
== C_PAUSED_SYNC_T
) {
3167 retcode
= s
.aftr_isp
? ERR_PIC_AFTER_DEP
:
3168 s
.peer_isp
? ERR_PIC_PEER_DEP
: ERR_PAUSE_IS_CLEAR
;
3170 retcode
= ERR_PAUSE_IS_CLEAR
;
3173 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3175 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3179 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
)
3181 return drbd_adm_simple_request_state(skb
, info
, NS(susp
, 1));
3184 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
)
3186 struct drbd_config_context adm_ctx
;
3187 struct drbd_device
*device
;
3188 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3190 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3191 if (!adm_ctx
.reply_skb
)
3193 if (retcode
!= NO_ERROR
)
3196 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3197 device
= adm_ctx
.device
;
3198 if (test_bit(NEW_CUR_UUID
, &device
->flags
)) {
3199 if (get_ldev_if_state(device
, D_ATTACHING
)) {
3200 drbd_uuid_new_current(device
);
3203 /* This is effectively a multi-stage "forced down".
3204 * The NEW_CUR_UUID bit is supposedly only set, if we
3205 * lost the replication connection, and are configured
3206 * to freeze IO and wait for some fence-peer handler.
3207 * So we still don't have a replication connection.
3208 * And now we don't have a local disk either. After
3209 * resume, we will fail all pending and new IO, because
3210 * we don't have any data anymore. Which means we will
3211 * eventually be able to terminate all users of this
3212 * device, and then take it down. By bumping the
3213 * "effective" data uuid, we make sure that you really
3214 * need to tear down before you reconfigure, we will
3215 * the refuse to re-connect or re-attach (because no
3216 * matching real data uuid exists).
3219 get_random_bytes(&val
, sizeof(u64
));
3220 drbd_set_ed_uuid(device
, val
);
3221 drbd_warn(device
, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3223 clear_bit(NEW_CUR_UUID
, &device
->flags
);
3225 drbd_suspend_io(device
);
3226 retcode
= drbd_request_state(device
, NS3(susp
, 0, susp_nod
, 0, susp_fen
, 0));
3227 if (retcode
== SS_SUCCESS
) {
3228 if (device
->state
.conn
< C_CONNECTED
)
3229 tl_clear(first_peer_device(device
)->connection
);
3230 if (device
->state
.disk
== D_DISKLESS
|| device
->state
.disk
== D_FAILED
)
3231 tl_restart(first_peer_device(device
)->connection
, FAIL_FROZEN_DISK_IO
);
3233 drbd_resume_io(device
);
3234 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3236 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3240 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
)
3242 return drbd_adm_simple_request_state(skb
, info
, NS(disk
, D_OUTDATED
));
3245 static int nla_put_drbd_cfg_context(struct sk_buff
*skb
,
3246 struct drbd_resource
*resource
,
3247 struct drbd_connection
*connection
,
3248 struct drbd_device
*device
)
3251 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_CFG_CONTEXT
);
3253 goto nla_put_failure
;
3255 nla_put_u32(skb
, T_ctx_volume
, device
->vnr
))
3256 goto nla_put_failure
;
3257 if (nla_put_string(skb
, T_ctx_resource_name
, resource
->name
))
3258 goto nla_put_failure
;
3260 if (connection
->my_addr_len
&&
3261 nla_put(skb
, T_ctx_my_addr
, connection
->my_addr_len
, &connection
->my_addr
))
3262 goto nla_put_failure
;
3263 if (connection
->peer_addr_len
&&
3264 nla_put(skb
, T_ctx_peer_addr
, connection
->peer_addr_len
, &connection
->peer_addr
))
3265 goto nla_put_failure
;
3267 nla_nest_end(skb
, nla
);
3272 nla_nest_cancel(skb
, nla
);
3277 * The generic netlink dump callbacks are called outside the genl_lock(), so
3278 * they cannot use the simple attribute parsing code which uses global
3281 static struct nlattr
*find_cfg_context_attr(const struct nlmsghdr
*nlh
, int attr
)
3283 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
3284 const int maxtype
= ARRAY_SIZE(drbd_cfg_context_nl_policy
) - 1;
3287 nla
= nla_find(nlmsg_attrdata(nlh
, hdrlen
), nlmsg_attrlen(nlh
, hdrlen
),
3288 DRBD_NLA_CFG_CONTEXT
);
3291 return drbd_nla_find_nested(maxtype
, nla
, __nla_type(attr
));
3294 static void resource_to_info(struct resource_info
*, struct drbd_resource
*);
3296 int drbd_adm_dump_resources(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3298 struct drbd_genlmsghdr
*dh
;
3299 struct drbd_resource
*resource
;
3300 struct resource_info resource_info
;
3301 struct resource_statistics resource_statistics
;
3306 for_each_resource_rcu(resource
, &drbd_resources
)
3307 if (resource
== (struct drbd_resource
*)cb
->args
[0])
3308 goto found_resource
;
3309 err
= 0; /* resource was probably deleted */
3312 resource
= list_entry(&drbd_resources
,
3313 struct drbd_resource
, resources
);
3316 list_for_each_entry_continue_rcu(resource
, &drbd_resources
, resources
) {
3323 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3324 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3325 NLM_F_MULTI
, DRBD_ADM_GET_RESOURCES
);
3330 dh
->ret_code
= NO_ERROR
;
3331 err
= nla_put_drbd_cfg_context(skb
, resource
, NULL
, NULL
);
3334 err
= res_opts_to_skb(skb
, &resource
->res_opts
, !capable(CAP_SYS_ADMIN
));
3337 resource_to_info(&resource_info
, resource
);
3338 err
= resource_info_to_skb(skb
, &resource_info
, !capable(CAP_SYS_ADMIN
));
3341 resource_statistics
.res_stat_write_ordering
= resource
->write_ordering
;
3342 err
= resource_statistics_to_skb(skb
, &resource_statistics
, !capable(CAP_SYS_ADMIN
));
3345 cb
->args
[0] = (long)resource
;
3346 genlmsg_end(skb
, dh
);
3356 static void device_to_statistics(struct device_statistics
*s
,
3357 struct drbd_device
*device
)
3359 memset(s
, 0, sizeof(*s
));
3360 s
->dev_upper_blocked
= !may_inc_ap_bio(device
);
3361 if (get_ldev(device
)) {
3362 struct drbd_md
*md
= &device
->ldev
->md
;
3363 u64
*history_uuids
= (u64
*)s
->history_uuids
;
3366 spin_lock_irq(&md
->uuid_lock
);
3367 s
->dev_current_uuid
= md
->uuid
[UI_CURRENT
];
3368 BUILD_BUG_ON(sizeof(s
->history_uuids
) < UI_HISTORY_END
- UI_HISTORY_START
+ 1);
3369 for (n
= 0; n
< UI_HISTORY_END
- UI_HISTORY_START
+ 1; n
++)
3370 history_uuids
[n
] = md
->uuid
[UI_HISTORY_START
+ n
];
3371 for (; n
< HISTORY_UUIDS
; n
++)
3372 history_uuids
[n
] = 0;
3373 s
->history_uuids_len
= HISTORY_UUIDS
;
3374 spin_unlock_irq(&md
->uuid_lock
);
3376 s
->dev_disk_flags
= md
->flags
;
3379 s
->dev_size
= get_capacity(device
->vdisk
);
3380 s
->dev_read
= device
->read_cnt
;
3381 s
->dev_write
= device
->writ_cnt
;
3382 s
->dev_al_writes
= device
->al_writ_cnt
;
3383 s
->dev_bm_writes
= device
->bm_writ_cnt
;
3384 s
->dev_upper_pending
= atomic_read(&device
->ap_bio_cnt
);
3385 s
->dev_lower_pending
= atomic_read(&device
->local_cnt
);
3386 s
->dev_al_suspended
= test_bit(AL_SUSPENDED
, &device
->flags
);
3387 s
->dev_exposed_data_uuid
= device
->ed_uuid
;
3390 static int put_resource_in_arg0(struct netlink_callback
*cb
, int holder_nr
)
3393 struct drbd_resource
*resource
=
3394 (struct drbd_resource
*)cb
->args
[0];
3395 kref_put(&resource
->kref
, drbd_destroy_resource
);
3401 int drbd_adm_dump_devices_done(struct netlink_callback
*cb
) {
3402 return put_resource_in_arg0(cb
, 7);
3405 static void device_to_info(struct device_info
*, struct drbd_device
*);
3407 int drbd_adm_dump_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3409 struct nlattr
*resource_filter
;
3410 struct drbd_resource
*resource
;
3411 struct drbd_device
*device
;
3412 int minor
, err
, retcode
;
3413 struct drbd_genlmsghdr
*dh
;
3414 struct device_info device_info
;
3415 struct device_statistics device_statistics
;
3416 struct idr
*idr_to_search
;
3418 resource
= (struct drbd_resource
*)cb
->args
[0];
3419 if (!cb
->args
[0] && !cb
->args
[1]) {
3420 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3421 if (resource_filter
) {
3422 retcode
= ERR_RES_NOT_KNOWN
;
3423 resource
= drbd_find_resource(nla_data(resource_filter
));
3426 cb
->args
[0] = (long)resource
;
3431 minor
= cb
->args
[1];
3432 idr_to_search
= resource
? &resource
->devices
: &drbd_devices
;
3433 device
= idr_get_next(idr_to_search
, &minor
);
3438 idr_for_each_entry_continue(idr_to_search
, device
, minor
) {
3440 goto put_result
; /* only one iteration */
3443 goto out
; /* no more devices */
3446 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3447 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3448 NLM_F_MULTI
, DRBD_ADM_GET_DEVICES
);
3452 dh
->ret_code
= retcode
;
3454 if (retcode
== NO_ERROR
) {
3455 dh
->minor
= device
->minor
;
3456 err
= nla_put_drbd_cfg_context(skb
, device
->resource
, NULL
, device
);
3459 if (get_ldev(device
)) {
3460 struct disk_conf
*disk_conf
=
3461 rcu_dereference(device
->ldev
->disk_conf
);
3463 err
= disk_conf_to_skb(skb
, disk_conf
, !capable(CAP_SYS_ADMIN
));
3468 device_to_info(&device_info
, device
);
3469 err
= device_info_to_skb(skb
, &device_info
, !capable(CAP_SYS_ADMIN
));
3473 device_to_statistics(&device_statistics
, device
);
3474 err
= device_statistics_to_skb(skb
, &device_statistics
, !capable(CAP_SYS_ADMIN
));
3477 cb
->args
[1] = minor
+ 1;
3479 genlmsg_end(skb
, dh
);
3489 int drbd_adm_dump_connections_done(struct netlink_callback
*cb
)
3491 return put_resource_in_arg0(cb
, 6);
3494 enum { SINGLE_RESOURCE
, ITERATE_RESOURCES
};
3496 int drbd_adm_dump_connections(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3498 struct nlattr
*resource_filter
;
3499 struct drbd_resource
*resource
= NULL
, *next_resource
;
3500 struct drbd_connection
*connection
;
3501 int err
= 0, retcode
;
3502 struct drbd_genlmsghdr
*dh
;
3503 struct connection_info connection_info
;
3504 struct connection_statistics connection_statistics
;
3507 resource
= (struct drbd_resource
*)cb
->args
[0];
3509 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3510 if (resource_filter
) {
3511 retcode
= ERR_RES_NOT_KNOWN
;
3512 resource
= drbd_find_resource(nla_data(resource_filter
));
3515 cb
->args
[0] = (long)resource
;
3516 cb
->args
[1] = SINGLE_RESOURCE
;
3520 if (list_empty(&drbd_resources
))
3522 resource
= list_first_entry(&drbd_resources
, struct drbd_resource
, resources
);
3523 kref_get(&resource
->kref
);
3524 cb
->args
[0] = (long)resource
;
3525 cb
->args
[1] = ITERATE_RESOURCES
;
3530 mutex_lock(&resource
->conf_update
);
3533 for_each_connection_rcu(connection
, resource
)
3534 if (connection
== (struct drbd_connection
*)cb
->args
[2])
3535 goto found_connection
;
3536 /* connection was probably deleted */
3537 goto no_more_connections
;
3539 connection
= list_entry(&resource
->connections
, struct drbd_connection
, connections
);
3542 list_for_each_entry_continue_rcu(connection
, &resource
->connections
, connections
) {
3543 if (!has_net_conf(connection
))
3546 goto put_result
; /* only one iteration */
3549 no_more_connections
:
3550 if (cb
->args
[1] == ITERATE_RESOURCES
) {
3551 for_each_resource_rcu(next_resource
, &drbd_resources
) {
3552 if (next_resource
== resource
)
3553 goto found_resource
;
3555 /* resource was probably deleted */
3560 list_for_each_entry_continue_rcu(next_resource
, &drbd_resources
, resources
) {
3561 mutex_unlock(&resource
->conf_update
);
3562 kref_put(&resource
->kref
, drbd_destroy_resource
);
3563 resource
= next_resource
;
3564 kref_get(&resource
->kref
);
3565 cb
->args
[0] = (long)resource
;
3569 goto out
; /* no more resources */
3572 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3573 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3574 NLM_F_MULTI
, DRBD_ADM_GET_CONNECTIONS
);
3578 dh
->ret_code
= retcode
;
3580 if (retcode
== NO_ERROR
) {
3581 struct net_conf
*net_conf
;
3583 err
= nla_put_drbd_cfg_context(skb
, resource
, connection
, NULL
);
3586 net_conf
= rcu_dereference(connection
->net_conf
);
3588 err
= net_conf_to_skb(skb
, net_conf
, !capable(CAP_SYS_ADMIN
));
3592 connection_to_info(&connection_info
, connection
);
3593 err
= connection_info_to_skb(skb
, &connection_info
, !capable(CAP_SYS_ADMIN
));
3596 connection_statistics
.conn_congested
= test_bit(NET_CONGESTED
, &connection
->flags
);
3597 err
= connection_statistics_to_skb(skb
, &connection_statistics
, !capable(CAP_SYS_ADMIN
));
3600 cb
->args
[2] = (long)connection
;
3602 genlmsg_end(skb
, dh
);
3608 mutex_unlock(&resource
->conf_update
);
3614 enum mdf_peer_flag
{
3615 MDF_PEER_CONNECTED
= 1 << 0,
3616 MDF_PEER_OUTDATED
= 1 << 1,
3617 MDF_PEER_FENCING
= 1 << 2,
3618 MDF_PEER_FULL_SYNC
= 1 << 3,
3621 static void peer_device_to_statistics(struct peer_device_statistics
*s
,
3622 struct drbd_peer_device
*peer_device
)
3624 struct drbd_device
*device
= peer_device
->device
;
3626 memset(s
, 0, sizeof(*s
));
3627 s
->peer_dev_received
= device
->recv_cnt
;
3628 s
->peer_dev_sent
= device
->send_cnt
;
3629 s
->peer_dev_pending
= atomic_read(&device
->ap_pending_cnt
) +
3630 atomic_read(&device
->rs_pending_cnt
);
3631 s
->peer_dev_unacked
= atomic_read(&device
->unacked_cnt
);
3632 s
->peer_dev_out_of_sync
= drbd_bm_total_weight(device
) << (BM_BLOCK_SHIFT
- 9);
3633 s
->peer_dev_resync_failed
= device
->rs_failed
<< (BM_BLOCK_SHIFT
- 9);
3634 if (get_ldev(device
)) {
3635 struct drbd_md
*md
= &device
->ldev
->md
;
3637 spin_lock_irq(&md
->uuid_lock
);
3638 s
->peer_dev_bitmap_uuid
= md
->uuid
[UI_BITMAP
];
3639 spin_unlock_irq(&md
->uuid_lock
);
3641 (drbd_md_test_flag(device
->ldev
, MDF_CONNECTED_IND
) ?
3642 MDF_PEER_CONNECTED
: 0) +
3643 (drbd_md_test_flag(device
->ldev
, MDF_CONSISTENT
) &&
3644 !drbd_md_test_flag(device
->ldev
, MDF_WAS_UP_TO_DATE
) ?
3645 MDF_PEER_OUTDATED
: 0) +
3646 /* FIXME: MDF_PEER_FENCING? */
3647 (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
) ?
3648 MDF_PEER_FULL_SYNC
: 0);
3653 int drbd_adm_dump_peer_devices_done(struct netlink_callback
*cb
)
3655 return put_resource_in_arg0(cb
, 9);
3658 int drbd_adm_dump_peer_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3660 struct nlattr
*resource_filter
;
3661 struct drbd_resource
*resource
;
3662 struct drbd_device
*device
;
3663 struct drbd_peer_device
*peer_device
= NULL
;
3664 int minor
, err
, retcode
;
3665 struct drbd_genlmsghdr
*dh
;
3666 struct idr
*idr_to_search
;
3668 resource
= (struct drbd_resource
*)cb
->args
[0];
3669 if (!cb
->args
[0] && !cb
->args
[1]) {
3670 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3671 if (resource_filter
) {
3672 retcode
= ERR_RES_NOT_KNOWN
;
3673 resource
= drbd_find_resource(nla_data(resource_filter
));
3677 cb
->args
[0] = (long)resource
;
3681 minor
= cb
->args
[1];
3682 idr_to_search
= resource
? &resource
->devices
: &drbd_devices
;
3683 device
= idr_find(idr_to_search
, minor
);
3688 device
= idr_get_next(idr_to_search
, &minor
);
3695 for_each_peer_device(peer_device
, device
)
3696 if (peer_device
== (struct drbd_peer_device
*)cb
->args
[2])
3697 goto found_peer_device
;
3698 /* peer device was probably deleted */
3701 /* Make peer_device point to the list head (not the first entry). */
3702 peer_device
= list_entry(&device
->peer_devices
, struct drbd_peer_device
, peer_devices
);
3705 list_for_each_entry_continue_rcu(peer_device
, &device
->peer_devices
, peer_devices
) {
3706 if (!has_net_conf(peer_device
->connection
))
3709 goto put_result
; /* only one iteration */
3714 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3715 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3716 NLM_F_MULTI
, DRBD_ADM_GET_PEER_DEVICES
);
3720 dh
->ret_code
= retcode
;
3722 if (retcode
== NO_ERROR
) {
3723 struct peer_device_info peer_device_info
;
3724 struct peer_device_statistics peer_device_statistics
;
3727 err
= nla_put_drbd_cfg_context(skb
, device
->resource
, peer_device
->connection
, device
);
3730 peer_device_to_info(&peer_device_info
, peer_device
);
3731 err
= peer_device_info_to_skb(skb
, &peer_device_info
, !capable(CAP_SYS_ADMIN
));
3734 peer_device_to_statistics(&peer_device_statistics
, peer_device
);
3735 err
= peer_device_statistics_to_skb(skb
, &peer_device_statistics
, !capable(CAP_SYS_ADMIN
));
3738 cb
->args
[1] = minor
;
3739 cb
->args
[2] = (long)peer_device
;
3741 genlmsg_end(skb
, dh
);
3751 * Return the connection of @resource if @resource has exactly one connection.
3753 static struct drbd_connection
*the_only_connection(struct drbd_resource
*resource
)
3755 struct list_head
*connections
= &resource
->connections
;
3757 if (list_empty(connections
) || connections
->next
->next
!= connections
)
3759 return list_first_entry(&resource
->connections
, struct drbd_connection
, connections
);
3762 static int nla_put_status_info(struct sk_buff
*skb
, struct drbd_device
*device
,
3763 const struct sib_info
*sib
)
3765 struct drbd_resource
*resource
= device
->resource
;
3766 struct state_info
*si
= NULL
; /* for sizeof(si->member); */
3770 int exclude_sensitive
;
3772 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3773 * to. So we better exclude_sensitive information.
3775 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3776 * in the context of the requesting user process. Exclude sensitive
3777 * information, unless current has superuser.
3779 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3780 * relies on the current implementation of netlink_dump(), which
3781 * executes the dump callback successively from netlink_recvmsg(),
3782 * always in the context of the receiving process */
3783 exclude_sensitive
= sib
|| !capable(CAP_SYS_ADMIN
);
3785 got_ldev
= get_ldev(device
);
3787 /* We need to add connection name and volume number information still.
3788 * Minor number is in drbd_genlmsghdr. */
3789 if (nla_put_drbd_cfg_context(skb
, resource
, the_only_connection(resource
), device
))
3790 goto nla_put_failure
;
3792 if (res_opts_to_skb(skb
, &device
->resource
->res_opts
, exclude_sensitive
))
3793 goto nla_put_failure
;
3797 struct disk_conf
*disk_conf
;
3799 disk_conf
= rcu_dereference(device
->ldev
->disk_conf
);
3800 err
= disk_conf_to_skb(skb
, disk_conf
, exclude_sensitive
);
3803 struct net_conf
*nc
;
3805 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
3807 err
= net_conf_to_skb(skb
, nc
, exclude_sensitive
);
3811 goto nla_put_failure
;
3813 nla
= nla_nest_start_noflag(skb
, DRBD_NLA_STATE_INFO
);
3815 goto nla_put_failure
;
3816 if (nla_put_u32(skb
, T_sib_reason
, sib
? sib
->sib_reason
: SIB_GET_STATUS_REPLY
) ||
3817 nla_put_u32(skb
, T_current_state
, device
->state
.i
) ||
3818 nla_put_u64_0pad(skb
, T_ed_uuid
, device
->ed_uuid
) ||
3819 nla_put_u64_0pad(skb
, T_capacity
, get_capacity(device
->vdisk
)) ||
3820 nla_put_u64_0pad(skb
, T_send_cnt
, device
->send_cnt
) ||
3821 nla_put_u64_0pad(skb
, T_recv_cnt
, device
->recv_cnt
) ||
3822 nla_put_u64_0pad(skb
, T_read_cnt
, device
->read_cnt
) ||
3823 nla_put_u64_0pad(skb
, T_writ_cnt
, device
->writ_cnt
) ||
3824 nla_put_u64_0pad(skb
, T_al_writ_cnt
, device
->al_writ_cnt
) ||
3825 nla_put_u64_0pad(skb
, T_bm_writ_cnt
, device
->bm_writ_cnt
) ||
3826 nla_put_u32(skb
, T_ap_bio_cnt
, atomic_read(&device
->ap_bio_cnt
)) ||
3827 nla_put_u32(skb
, T_ap_pending_cnt
, atomic_read(&device
->ap_pending_cnt
)) ||
3828 nla_put_u32(skb
, T_rs_pending_cnt
, atomic_read(&device
->rs_pending_cnt
)))
3829 goto nla_put_failure
;
3834 spin_lock_irq(&device
->ldev
->md
.uuid_lock
);
3835 err
= nla_put(skb
, T_uuids
, sizeof(si
->uuids
), device
->ldev
->md
.uuid
);
3836 spin_unlock_irq(&device
->ldev
->md
.uuid_lock
);
3839 goto nla_put_failure
;
3841 if (nla_put_u32(skb
, T_disk_flags
, device
->ldev
->md
.flags
) ||
3842 nla_put_u64_0pad(skb
, T_bits_total
, drbd_bm_bits(device
)) ||
3843 nla_put_u64_0pad(skb
, T_bits_oos
,
3844 drbd_bm_total_weight(device
)))
3845 goto nla_put_failure
;
3846 if (C_SYNC_SOURCE
<= device
->state
.conn
&&
3847 C_PAUSED_SYNC_T
>= device
->state
.conn
) {
3848 if (nla_put_u64_0pad(skb
, T_bits_rs_total
,
3849 device
->rs_total
) ||
3850 nla_put_u64_0pad(skb
, T_bits_rs_failed
,
3852 goto nla_put_failure
;
3857 switch(sib
->sib_reason
) {
3858 case SIB_SYNC_PROGRESS
:
3859 case SIB_GET_STATUS_REPLY
:
3861 case SIB_STATE_CHANGE
:
3862 if (nla_put_u32(skb
, T_prev_state
, sib
->os
.i
) ||
3863 nla_put_u32(skb
, T_new_state
, sib
->ns
.i
))
3864 goto nla_put_failure
;
3866 case SIB_HELPER_POST
:
3867 if (nla_put_u32(skb
, T_helper_exit_code
,
3868 sib
->helper_exit_code
))
3869 goto nla_put_failure
;
3871 case SIB_HELPER_PRE
:
3872 if (nla_put_string(skb
, T_helper
, sib
->helper_name
))
3873 goto nla_put_failure
;
3877 nla_nest_end(skb
, nla
);
3887 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
)
3889 struct drbd_config_context adm_ctx
;
3890 enum drbd_ret_code retcode
;
3893 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3894 if (!adm_ctx
.reply_skb
)
3896 if (retcode
!= NO_ERROR
)
3899 err
= nla_put_status_info(adm_ctx
.reply_skb
, adm_ctx
.device
, NULL
);
3901 nlmsg_free(adm_ctx
.reply_skb
);
3905 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3909 static int get_one_status(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3911 struct drbd_device
*device
;
3912 struct drbd_genlmsghdr
*dh
;
3913 struct drbd_resource
*pos
= (struct drbd_resource
*)cb
->args
[0];
3914 struct drbd_resource
*resource
= NULL
;
3915 struct drbd_resource
*tmp
;
3916 unsigned volume
= cb
->args
[1];
3918 /* Open coded, deferred, iteration:
3919 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3920 * connection = "first connection of resource or undefined";
3921 * idr_for_each_entry(&resource->devices, device, i) {
3925 * where resource is cb->args[0];
3926 * and i is cb->args[1];
3928 * cb->args[2] indicates if we shall loop over all resources,
3929 * or just dump all volumes of a single resource.
3931 * This may miss entries inserted after this dump started,
3932 * or entries deleted before they are reached.
3934 * We need to make sure the device won't disappear while
3935 * we are looking at it, and revalidate our iterators
3936 * on each iteration.
3939 /* synchronize with conn_create()/drbd_destroy_connection() */
3941 /* revalidate iterator position */
3942 for_each_resource_rcu(tmp
, &drbd_resources
) {
3944 /* first iteration */
3956 device
= idr_get_next(&resource
->devices
, &volume
);
3958 /* No more volumes to dump on this resource.
3959 * Advance resource iterator. */
3960 pos
= list_entry_rcu(resource
->resources
.next
,
3961 struct drbd_resource
, resources
);
3962 /* Did we dump any volume of this resource yet? */
3964 /* If we reached the end of the list,
3965 * or only a single resource dump was requested,
3967 if (&pos
->resources
== &drbd_resources
|| cb
->args
[2])
3975 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3976 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3977 NLM_F_MULTI
, DRBD_ADM_GET_STATUS
);
3982 /* This is a connection without a single volume.
3983 * Suprisingly enough, it may have a network
3985 struct drbd_connection
*connection
;
3988 dh
->ret_code
= NO_ERROR
;
3989 connection
= the_only_connection(resource
);
3990 if (nla_put_drbd_cfg_context(skb
, resource
, connection
, NULL
))
3993 struct net_conf
*nc
;
3995 nc
= rcu_dereference(connection
->net_conf
);
3996 if (nc
&& net_conf_to_skb(skb
, nc
, 1) != 0)
4002 D_ASSERT(device
, device
->vnr
== volume
);
4003 D_ASSERT(device
, device
->resource
== resource
);
4005 dh
->minor
= device_to_minor(device
);
4006 dh
->ret_code
= NO_ERROR
;
4008 if (nla_put_status_info(skb
, device
, NULL
)) {
4010 genlmsg_cancel(skb
, dh
);
4014 genlmsg_end(skb
, dh
);
4019 /* where to start the next iteration */
4020 cb
->args
[0] = (long)pos
;
4021 cb
->args
[1] = (pos
== resource
) ? volume
+ 1 : 0;
4023 /* No more resources/volumes/minors found results in an empty skb.
4024 * Which will terminate the dump. */
4029 * Request status of all resources, or of all volumes within a single resource.
4031 * This is a dump, as the answer may not fit in a single reply skb otherwise.
4032 * Which means we cannot use the family->attrbuf or other such members, because
4033 * dump is NOT protected by the genl_lock(). During dump, we only have access
4034 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
4036 * Once things are setup properly, we call into get_one_status().
4038 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4040 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
4042 const char *resource_name
;
4043 struct drbd_resource
*resource
;
4046 /* Is this a followup call? */
4048 /* ... of a single resource dump,
4049 * and the resource iterator has been advanced already? */
4050 if (cb
->args
[2] && cb
->args
[2] != cb
->args
[0])
4051 return 0; /* DONE. */
4055 /* First call (from netlink_dump_start). We need to figure out
4056 * which resource(s) the user wants us to dump. */
4057 nla
= nla_find(nlmsg_attrdata(cb
->nlh
, hdrlen
),
4058 nlmsg_attrlen(cb
->nlh
, hdrlen
),
4059 DRBD_NLA_CFG_CONTEXT
);
4061 /* No explicit context given. Dump all. */
4064 maxtype
= ARRAY_SIZE(drbd_cfg_context_nl_policy
) - 1;
4065 nla
= drbd_nla_find_nested(maxtype
, nla
, __nla_type(T_ctx_resource_name
));
4067 return PTR_ERR(nla
);
4068 /* context given, but no name present? */
4071 resource_name
= nla_data(nla
);
4072 if (!*resource_name
)
4074 resource
= drbd_find_resource(resource_name
);
4078 kref_put(&resource
->kref
, drbd_destroy_resource
); /* get_one_status() revalidates the resource */
4080 /* prime iterators, and set "filter" mode mark:
4081 * only dump this connection. */
4082 cb
->args
[0] = (long)resource
;
4083 /* cb->args[1] = 0; passed in this way. */
4084 cb
->args
[2] = (long)resource
;
4087 return get_one_status(skb
, cb
);
4090 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
)
4092 struct drbd_config_context adm_ctx
;
4093 enum drbd_ret_code retcode
;
4094 struct timeout_parms tp
;
4097 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4098 if (!adm_ctx
.reply_skb
)
4100 if (retcode
!= NO_ERROR
)
4104 adm_ctx
.device
->state
.pdsk
== D_OUTDATED
? UT_PEER_OUTDATED
:
4105 test_bit(USE_DEGR_WFC_T
, &adm_ctx
.device
->flags
) ? UT_DEGRADED
:
4108 err
= timeout_parms_to_priv_skb(adm_ctx
.reply_skb
, &tp
);
4110 nlmsg_free(adm_ctx
.reply_skb
);
4114 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4118 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
)
4120 struct drbd_config_context adm_ctx
;
4121 struct drbd_device
*device
;
4122 enum drbd_ret_code retcode
;
4123 struct start_ov_parms parms
;
4125 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4126 if (!adm_ctx
.reply_skb
)
4128 if (retcode
!= NO_ERROR
)
4131 device
= adm_ctx
.device
;
4133 /* resume from last known position, if possible */
4134 parms
.ov_start_sector
= device
->ov_start_sector
;
4135 parms
.ov_stop_sector
= ULLONG_MAX
;
4136 if (info
->attrs
[DRBD_NLA_START_OV_PARMS
]) {
4137 int err
= start_ov_parms_from_attrs(&parms
, info
);
4139 retcode
= ERR_MANDATORY_TAG
;
4140 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4144 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4146 /* w_make_ov_request expects position to be aligned */
4147 device
->ov_start_sector
= parms
.ov_start_sector
& ~(BM_SECT_PER_BIT
-1);
4148 device
->ov_stop_sector
= parms
.ov_stop_sector
;
4150 /* If there is still bitmap IO pending, e.g. previous resync or verify
4151 * just being finished, wait for it before requesting a new resync. */
4152 drbd_suspend_io(device
);
4153 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
4154 retcode
= drbd_request_state(device
, NS(conn
, C_VERIFY_S
));
4155 drbd_resume_io(device
);
4157 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4159 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4164 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
)
4166 struct drbd_config_context adm_ctx
;
4167 struct drbd_device
*device
;
4168 enum drbd_ret_code retcode
;
4169 int skip_initial_sync
= 0;
4171 struct new_c_uuid_parms args
;
4173 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4174 if (!adm_ctx
.reply_skb
)
4176 if (retcode
!= NO_ERROR
)
4179 device
= adm_ctx
.device
;
4180 memset(&args
, 0, sizeof(args
));
4181 if (info
->attrs
[DRBD_NLA_NEW_C_UUID_PARMS
]) {
4182 err
= new_c_uuid_parms_from_attrs(&args
, info
);
4184 retcode
= ERR_MANDATORY_TAG
;
4185 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4190 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4191 mutex_lock(device
->state_mutex
); /* Protects us against serialized state changes. */
4193 if (!get_ldev(device
)) {
4194 retcode
= ERR_NO_DISK
;
4198 /* this is "skip initial sync", assume to be clean */
4199 if (device
->state
.conn
== C_CONNECTED
&&
4200 first_peer_device(device
)->connection
->agreed_pro_version
>= 90 &&
4201 device
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&& args
.clear_bm
) {
4202 drbd_info(device
, "Preparing to skip initial sync\n");
4203 skip_initial_sync
= 1;
4204 } else if (device
->state
.conn
!= C_STANDALONE
) {
4205 retcode
= ERR_CONNECTED
;
4209 drbd_uuid_set(device
, UI_BITMAP
, 0); /* Rotate UI_BITMAP to History 1, etc... */
4210 drbd_uuid_new_current(device
); /* New current, previous to UI_BITMAP */
4212 if (args
.clear_bm
) {
4213 err
= drbd_bitmap_io(device
, &drbd_bmio_clear_n_write
,
4214 "clear_n_write from new_c_uuid", BM_LOCKED_MASK
);
4216 drbd_err(device
, "Writing bitmap failed with %d\n", err
);
4217 retcode
= ERR_IO_MD_DISK
;
4219 if (skip_initial_sync
) {
4220 drbd_send_uuids_skip_initial_sync(first_peer_device(device
));
4221 _drbd_uuid_set(device
, UI_BITMAP
, 0);
4222 drbd_print_uuids(device
, "cleared bitmap UUID");
4223 spin_lock_irq(&device
->resource
->req_lock
);
4224 _drbd_set_state(_NS2(device
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
4226 spin_unlock_irq(&device
->resource
->req_lock
);
4230 drbd_md_sync(device
);
4234 mutex_unlock(device
->state_mutex
);
4235 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4237 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4241 static enum drbd_ret_code
4242 drbd_check_resource_name(struct drbd_config_context
*adm_ctx
)
4244 const char *name
= adm_ctx
->resource_name
;
4245 if (!name
|| !name
[0]) {
4246 drbd_msg_put_info(adm_ctx
->reply_skb
, "resource name missing");
4247 return ERR_MANDATORY_TAG
;
4249 /* if we want to use these in sysfs/configfs/debugfs some day,
4250 * we must not allow slashes */
4251 if (strchr(name
, '/')) {
4252 drbd_msg_put_info(adm_ctx
->reply_skb
, "invalid resource name");
4253 return ERR_INVALID_REQUEST
;
4258 static void resource_to_info(struct resource_info
*info
,
4259 struct drbd_resource
*resource
)
4261 info
->res_role
= conn_highest_role(first_connection(resource
));
4262 info
->res_susp
= resource
->susp
;
4263 info
->res_susp_nod
= resource
->susp_nod
;
4264 info
->res_susp_fen
= resource
->susp_fen
;
4267 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
)
4269 struct drbd_connection
*connection
;
4270 struct drbd_config_context adm_ctx
;
4271 enum drbd_ret_code retcode
;
4272 struct res_opts res_opts
;
4275 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, 0);
4276 if (!adm_ctx
.reply_skb
)
4278 if (retcode
!= NO_ERROR
)
4281 set_res_opts_defaults(&res_opts
);
4282 err
= res_opts_from_attrs(&res_opts
, info
);
4283 if (err
&& err
!= -ENOMSG
) {
4284 retcode
= ERR_MANDATORY_TAG
;
4285 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4289 retcode
= drbd_check_resource_name(&adm_ctx
);
4290 if (retcode
!= NO_ERROR
)
4293 if (adm_ctx
.resource
) {
4294 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
) {
4295 retcode
= ERR_INVALID_REQUEST
;
4296 drbd_msg_put_info(adm_ctx
.reply_skb
, "resource exists");
4298 /* else: still NO_ERROR */
4302 /* not yet safe for genl_family.parallel_ops */
4303 mutex_lock(&resources_mutex
);
4304 connection
= conn_create(adm_ctx
.resource_name
, &res_opts
);
4305 mutex_unlock(&resources_mutex
);
4308 struct resource_info resource_info
;
4310 mutex_lock(¬ification_mutex
);
4311 resource_to_info(&resource_info
, connection
->resource
);
4312 notify_resource_state(NULL
, 0, connection
->resource
,
4313 &resource_info
, NOTIFY_CREATE
);
4314 mutex_unlock(¬ification_mutex
);
4316 retcode
= ERR_NOMEM
;
4319 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4323 static void device_to_info(struct device_info
*info
,
4324 struct drbd_device
*device
)
4326 info
->dev_disk_state
= device
->state
.disk
;
4330 int drbd_adm_new_minor(struct sk_buff
*skb
, struct genl_info
*info
)
4332 struct drbd_config_context adm_ctx
;
4333 struct drbd_genlmsghdr
*dh
= info
->userhdr
;
4334 enum drbd_ret_code retcode
;
4336 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4337 if (!adm_ctx
.reply_skb
)
4339 if (retcode
!= NO_ERROR
)
4342 if (dh
->minor
> MINORMASK
) {
4343 drbd_msg_put_info(adm_ctx
.reply_skb
, "requested minor out of range");
4344 retcode
= ERR_INVALID_REQUEST
;
4347 if (adm_ctx
.volume
> DRBD_VOLUME_MAX
) {
4348 drbd_msg_put_info(adm_ctx
.reply_skb
, "requested volume id out of range");
4349 retcode
= ERR_INVALID_REQUEST
;
4353 /* drbd_adm_prepare made sure already
4354 * that first_peer_device(device)->connection and device->vnr match the request. */
4355 if (adm_ctx
.device
) {
4356 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
4357 retcode
= ERR_MINOR_OR_VOLUME_EXISTS
;
4358 /* else: still NO_ERROR */
4362 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4363 retcode
= drbd_create_device(&adm_ctx
, dh
->minor
);
4364 if (retcode
== NO_ERROR
) {
4365 struct drbd_device
*device
;
4366 struct drbd_peer_device
*peer_device
;
4367 struct device_info info
;
4368 unsigned int peer_devices
= 0;
4369 enum drbd_notification_type flags
;
4371 device
= minor_to_device(dh
->minor
);
4372 for_each_peer_device(peer_device
, device
) {
4373 if (!has_net_conf(peer_device
->connection
))
4378 device_to_info(&info
, device
);
4379 mutex_lock(¬ification_mutex
);
4380 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
4381 notify_device_state(NULL
, 0, device
, &info
, NOTIFY_CREATE
| flags
);
4382 for_each_peer_device(peer_device
, device
) {
4383 struct peer_device_info peer_device_info
;
4385 if (!has_net_conf(peer_device
->connection
))
4387 peer_device_to_info(&peer_device_info
, peer_device
);
4388 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
4389 notify_peer_device_state(NULL
, 0, peer_device
, &peer_device_info
,
4390 NOTIFY_CREATE
| flags
);
4392 mutex_unlock(¬ification_mutex
);
4394 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4396 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4400 static enum drbd_ret_code
adm_del_minor(struct drbd_device
*device
)
4402 struct drbd_peer_device
*peer_device
;
4404 if (device
->state
.disk
== D_DISKLESS
&&
4405 /* no need to be device->state.conn == C_STANDALONE &&
4406 * we may want to delete a minor from a live replication group.
4408 device
->state
.role
== R_SECONDARY
) {
4409 struct drbd_connection
*connection
=
4410 first_connection(device
->resource
);
4412 _drbd_request_state(device
, NS(conn
, C_WF_REPORT_PARAMS
),
4413 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
4415 /* If the state engine hasn't stopped the sender thread yet, we
4416 * need to flush the sender work queue before generating the
4417 * DESTROY events here. */
4418 if (get_t_state(&connection
->worker
) == RUNNING
)
4419 drbd_flush_workqueue(&connection
->sender_work
);
4421 mutex_lock(¬ification_mutex
);
4422 for_each_peer_device(peer_device
, device
) {
4423 if (!has_net_conf(peer_device
->connection
))
4425 notify_peer_device_state(NULL
, 0, peer_device
, NULL
,
4426 NOTIFY_DESTROY
| NOTIFY_CONTINUES
);
4428 notify_device_state(NULL
, 0, device
, NULL
, NOTIFY_DESTROY
);
4429 mutex_unlock(¬ification_mutex
);
4431 drbd_delete_device(device
);
4434 return ERR_MINOR_CONFIGURED
;
4437 int drbd_adm_del_minor(struct sk_buff
*skb
, struct genl_info
*info
)
4439 struct drbd_config_context adm_ctx
;
4440 enum drbd_ret_code retcode
;
4442 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4443 if (!adm_ctx
.reply_skb
)
4445 if (retcode
!= NO_ERROR
)
4448 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4449 retcode
= adm_del_minor(adm_ctx
.device
);
4450 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4452 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4456 static int adm_del_resource(struct drbd_resource
*resource
)
4458 struct drbd_connection
*connection
;
4460 for_each_connection(connection
, resource
) {
4461 if (connection
->cstate
> C_STANDALONE
)
4462 return ERR_NET_CONFIGURED
;
4464 if (!idr_is_empty(&resource
->devices
))
4465 return ERR_RES_IN_USE
;
4467 /* The state engine has stopped the sender thread, so we don't
4468 * need to flush the sender work queue before generating the
4469 * DESTROY event here. */
4470 mutex_lock(¬ification_mutex
);
4471 notify_resource_state(NULL
, 0, resource
, NULL
, NOTIFY_DESTROY
);
4472 mutex_unlock(¬ification_mutex
);
4474 mutex_lock(&resources_mutex
);
4475 list_del_rcu(&resource
->resources
);
4476 mutex_unlock(&resources_mutex
);
4477 /* Make sure all threads have actually stopped: state handling only
4478 * does drbd_thread_stop_nowait(). */
4479 list_for_each_entry(connection
, &resource
->connections
, connections
)
4480 drbd_thread_stop(&connection
->worker
);
4482 drbd_free_resource(resource
);
4486 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
)
4488 struct drbd_config_context adm_ctx
;
4489 struct drbd_resource
*resource
;
4490 struct drbd_connection
*connection
;
4491 struct drbd_device
*device
;
4492 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4495 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4496 if (!adm_ctx
.reply_skb
)
4498 if (retcode
!= NO_ERROR
)
4501 resource
= adm_ctx
.resource
;
4502 mutex_lock(&resource
->adm_mutex
);
4504 for_each_connection(connection
, resource
) {
4505 struct drbd_peer_device
*peer_device
;
4507 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
4508 retcode
= drbd_set_role(peer_device
->device
, R_SECONDARY
, 0);
4509 if (retcode
< SS_SUCCESS
) {
4510 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to demote");
4515 retcode
= conn_try_disconnect(connection
, 0);
4516 if (retcode
< SS_SUCCESS
) {
4517 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to disconnect");
4523 idr_for_each_entry(&resource
->devices
, device
, i
) {
4524 retcode
= adm_detach(device
, 0);
4525 if (retcode
< SS_SUCCESS
|| retcode
> NO_ERROR
) {
4526 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to detach");
4531 /* delete volumes */
4532 idr_for_each_entry(&resource
->devices
, device
, i
) {
4533 retcode
= adm_del_minor(device
);
4534 if (retcode
!= NO_ERROR
) {
4535 /* "can not happen" */
4536 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to delete volume");
4541 retcode
= adm_del_resource(resource
);
4543 mutex_unlock(&resource
->adm_mutex
);
4545 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4549 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
)
4551 struct drbd_config_context adm_ctx
;
4552 struct drbd_resource
*resource
;
4553 enum drbd_ret_code retcode
;
4555 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4556 if (!adm_ctx
.reply_skb
)
4558 if (retcode
!= NO_ERROR
)
4560 resource
= adm_ctx
.resource
;
4562 mutex_lock(&resource
->adm_mutex
);
4563 retcode
= adm_del_resource(resource
);
4564 mutex_unlock(&resource
->adm_mutex
);
4566 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4570 void drbd_bcast_event(struct drbd_device
*device
, const struct sib_info
*sib
)
4572 struct sk_buff
*msg
;
4573 struct drbd_genlmsghdr
*d_out
;
4577 seq
= atomic_inc_return(&drbd_genl_seq
);
4578 msg
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4583 d_out
= genlmsg_put(msg
, 0, seq
, &drbd_genl_family
, 0, DRBD_EVENT
);
4584 if (!d_out
) /* cannot happen, but anyways. */
4585 goto nla_put_failure
;
4586 d_out
->minor
= device_to_minor(device
);
4587 d_out
->ret_code
= NO_ERROR
;
4589 if (nla_put_status_info(msg
, device
, sib
))
4590 goto nla_put_failure
;
4591 genlmsg_end(msg
, d_out
);
4592 err
= drbd_genl_multicast_events(msg
, GFP_NOWAIT
);
4593 /* msg has been consumed or freed in netlink_broadcast() */
4594 if (err
&& err
!= -ESRCH
)
4602 drbd_err(device
, "Error %d while broadcasting event. "
4603 "Event seq:%u sib_reason:%u\n",
4604 err
, seq
, sib
->sib_reason
);
4607 static int nla_put_notification_header(struct sk_buff
*msg
,
4608 enum drbd_notification_type type
)
4610 struct drbd_notification_header nh
= {
4614 return drbd_notification_header_to_skb(msg
, &nh
, true);
4617 void notify_resource_state(struct sk_buff
*skb
,
4619 struct drbd_resource
*resource
,
4620 struct resource_info
*resource_info
,
4621 enum drbd_notification_type type
)
4623 struct resource_statistics resource_statistics
;
4624 struct drbd_genlmsghdr
*dh
;
4625 bool multicast
= false;
4629 seq
= atomic_inc_return(¬ify_genl_seq
);
4630 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4638 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_RESOURCE_STATE
);
4640 goto nla_put_failure
;
4642 dh
->ret_code
= NO_ERROR
;
4643 if (nla_put_drbd_cfg_context(skb
, resource
, NULL
, NULL
) ||
4644 nla_put_notification_header(skb
, type
) ||
4645 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4646 resource_info_to_skb(skb
, resource_info
, true)))
4647 goto nla_put_failure
;
4648 resource_statistics
.res_stat_write_ordering
= resource
->write_ordering
;
4649 err
= resource_statistics_to_skb(skb
, &resource_statistics
, !capable(CAP_SYS_ADMIN
));
4651 goto nla_put_failure
;
4652 genlmsg_end(skb
, dh
);
4654 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4655 /* skb has been consumed or freed in netlink_broadcast() */
4656 if (err
&& err
!= -ESRCH
)
4664 drbd_err(resource
, "Error %d while broadcasting event. Event seq:%u\n",
4668 void notify_device_state(struct sk_buff
*skb
,
4670 struct drbd_device
*device
,
4671 struct device_info
*device_info
,
4672 enum drbd_notification_type type
)
4674 struct device_statistics device_statistics
;
4675 struct drbd_genlmsghdr
*dh
;
4676 bool multicast
= false;
4680 seq
= atomic_inc_return(¬ify_genl_seq
);
4681 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4689 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_DEVICE_STATE
);
4691 goto nla_put_failure
;
4692 dh
->minor
= device
->minor
;
4693 dh
->ret_code
= NO_ERROR
;
4694 if (nla_put_drbd_cfg_context(skb
, device
->resource
, NULL
, device
) ||
4695 nla_put_notification_header(skb
, type
) ||
4696 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4697 device_info_to_skb(skb
, device_info
, true)))
4698 goto nla_put_failure
;
4699 device_to_statistics(&device_statistics
, device
);
4700 device_statistics_to_skb(skb
, &device_statistics
, !capable(CAP_SYS_ADMIN
));
4701 genlmsg_end(skb
, dh
);
4703 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4704 /* skb has been consumed or freed in netlink_broadcast() */
4705 if (err
&& err
!= -ESRCH
)
4713 drbd_err(device
, "Error %d while broadcasting event. Event seq:%u\n",
4717 void notify_connection_state(struct sk_buff
*skb
,
4719 struct drbd_connection
*connection
,
4720 struct connection_info
*connection_info
,
4721 enum drbd_notification_type type
)
4723 struct connection_statistics connection_statistics
;
4724 struct drbd_genlmsghdr
*dh
;
4725 bool multicast
= false;
4729 seq
= atomic_inc_return(¬ify_genl_seq
);
4730 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4738 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_CONNECTION_STATE
);
4740 goto nla_put_failure
;
4742 dh
->ret_code
= NO_ERROR
;
4743 if (nla_put_drbd_cfg_context(skb
, connection
->resource
, connection
, NULL
) ||
4744 nla_put_notification_header(skb
, type
) ||
4745 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4746 connection_info_to_skb(skb
, connection_info
, true)))
4747 goto nla_put_failure
;
4748 connection_statistics
.conn_congested
= test_bit(NET_CONGESTED
, &connection
->flags
);
4749 connection_statistics_to_skb(skb
, &connection_statistics
, !capable(CAP_SYS_ADMIN
));
4750 genlmsg_end(skb
, dh
);
4752 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4753 /* skb has been consumed or freed in netlink_broadcast() */
4754 if (err
&& err
!= -ESRCH
)
4762 drbd_err(connection
, "Error %d while broadcasting event. Event seq:%u\n",
4766 void notify_peer_device_state(struct sk_buff
*skb
,
4768 struct drbd_peer_device
*peer_device
,
4769 struct peer_device_info
*peer_device_info
,
4770 enum drbd_notification_type type
)
4772 struct peer_device_statistics peer_device_statistics
;
4773 struct drbd_resource
*resource
= peer_device
->device
->resource
;
4774 struct drbd_genlmsghdr
*dh
;
4775 bool multicast
= false;
4779 seq
= atomic_inc_return(¬ify_genl_seq
);
4780 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4788 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_PEER_DEVICE_STATE
);
4790 goto nla_put_failure
;
4792 dh
->ret_code
= NO_ERROR
;
4793 if (nla_put_drbd_cfg_context(skb
, resource
, peer_device
->connection
, peer_device
->device
) ||
4794 nla_put_notification_header(skb
, type
) ||
4795 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4796 peer_device_info_to_skb(skb
, peer_device_info
, true)))
4797 goto nla_put_failure
;
4798 peer_device_to_statistics(&peer_device_statistics
, peer_device
);
4799 peer_device_statistics_to_skb(skb
, &peer_device_statistics
, !capable(CAP_SYS_ADMIN
));
4800 genlmsg_end(skb
, dh
);
4802 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4803 /* skb has been consumed or freed in netlink_broadcast() */
4804 if (err
&& err
!= -ESRCH
)
4812 drbd_err(peer_device
, "Error %d while broadcasting event. Event seq:%u\n",
4816 void notify_helper(enum drbd_notification_type type
,
4817 struct drbd_device
*device
, struct drbd_connection
*connection
,
4818 const char *name
, int status
)
4820 struct drbd_resource
*resource
= device
? device
->resource
: connection
->resource
;
4821 struct drbd_helper_info helper_info
;
4822 unsigned int seq
= atomic_inc_return(¬ify_genl_seq
);
4823 struct sk_buff
*skb
= NULL
;
4824 struct drbd_genlmsghdr
*dh
;
4827 strlcpy(helper_info
.helper_name
, name
, sizeof(helper_info
.helper_name
));
4828 helper_info
.helper_name_len
= min(strlen(name
), sizeof(helper_info
.helper_name
));
4829 helper_info
.helper_status
= status
;
4831 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4837 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_HELPER
);
4840 dh
->minor
= device
? device
->minor
: -1;
4841 dh
->ret_code
= NO_ERROR
;
4842 mutex_lock(¬ification_mutex
);
4843 if (nla_put_drbd_cfg_context(skb
, resource
, connection
, device
) ||
4844 nla_put_notification_header(skb
, type
) ||
4845 drbd_helper_info_to_skb(skb
, &helper_info
, true))
4847 genlmsg_end(skb
, dh
);
4848 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4850 /* skb has been consumed or freed in netlink_broadcast() */
4851 if (err
&& err
!= -ESRCH
)
4853 mutex_unlock(¬ification_mutex
);
4857 mutex_unlock(¬ification_mutex
);
4860 drbd_err(resource
, "Error %d while broadcasting event. Event seq:%u\n",
4864 static void notify_initial_state_done(struct sk_buff
*skb
, unsigned int seq
)
4866 struct drbd_genlmsghdr
*dh
;
4870 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_INITIAL_STATE_DONE
);
4872 goto nla_put_failure
;
4874 dh
->ret_code
= NO_ERROR
;
4875 if (nla_put_notification_header(skb
, NOTIFY_EXISTS
))
4876 goto nla_put_failure
;
4877 genlmsg_end(skb
, dh
);
4882 pr_err("Error %d sending event. Event seq:%u\n", err
, seq
);
4885 static void free_state_changes(struct list_head
*list
)
4887 while (!list_empty(list
)) {
4888 struct drbd_state_change
*state_change
=
4889 list_first_entry(list
, struct drbd_state_change
, list
);
4890 list_del(&state_change
->list
);
4891 forget_state_change(state_change
);
4895 static unsigned int notifications_for_state_change(struct drbd_state_change
*state_change
)
4898 state_change
->n_connections
+
4899 state_change
->n_devices
+
4900 state_change
->n_devices
* state_change
->n_connections
;
4903 static int get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4905 struct drbd_state_change
*state_change
= (struct drbd_state_change
*)cb
->args
[0];
4906 unsigned int seq
= cb
->args
[2];
4908 enum drbd_notification_type flags
= 0;
4910 /* There is no need for taking notification_mutex here: it doesn't
4911 matter if the initial state events mix with later state chage
4912 events; we can always tell the events apart by the NOTIFY_EXISTS
4916 if (cb
->args
[5] == 1) {
4917 notify_initial_state_done(skb
, seq
);
4921 if (cb
->args
[4] < cb
->args
[3])
4922 flags
|= NOTIFY_CONTINUES
;
4924 notify_resource_state_change(skb
, seq
, state_change
->resource
,
4925 NOTIFY_EXISTS
| flags
);
4929 if (n
< state_change
->n_connections
) {
4930 notify_connection_state_change(skb
, seq
, &state_change
->connections
[n
],
4931 NOTIFY_EXISTS
| flags
);
4934 n
-= state_change
->n_connections
;
4935 if (n
< state_change
->n_devices
) {
4936 notify_device_state_change(skb
, seq
, &state_change
->devices
[n
],
4937 NOTIFY_EXISTS
| flags
);
4940 n
-= state_change
->n_devices
;
4941 if (n
< state_change
->n_devices
* state_change
->n_connections
) {
4942 notify_peer_device_state_change(skb
, seq
, &state_change
->peer_devices
[n
],
4943 NOTIFY_EXISTS
| flags
);
4948 if (cb
->args
[4] == cb
->args
[3]) {
4949 struct drbd_state_change
*next_state_change
=
4950 list_entry(state_change
->list
.next
,
4951 struct drbd_state_change
, list
);
4952 cb
->args
[0] = (long)next_state_change
;
4953 cb
->args
[3] = notifications_for_state_change(next_state_change
);
4960 int drbd_adm_get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4962 struct drbd_resource
*resource
;
4965 if (cb
->args
[5] >= 1) {
4966 if (cb
->args
[5] > 1)
4967 return get_initial_state(skb
, cb
);
4969 struct drbd_state_change
*state_change
=
4970 (struct drbd_state_change
*)cb
->args
[0];
4972 /* connect list to head */
4973 list_add(&head
, &state_change
->list
);
4974 free_state_changes(&head
);
4979 cb
->args
[5] = 2; /* number of iterations */
4980 mutex_lock(&resources_mutex
);
4981 for_each_resource(resource
, &drbd_resources
) {
4982 struct drbd_state_change
*state_change
;
4984 state_change
= remember_old_state(resource
, GFP_KERNEL
);
4985 if (!state_change
) {
4986 if (!list_empty(&head
))
4987 free_state_changes(&head
);
4988 mutex_unlock(&resources_mutex
);
4991 copy_old_to_new_state_change(state_change
);
4992 list_add_tail(&state_change
->list
, &head
);
4993 cb
->args
[5] += notifications_for_state_change(state_change
);
4995 mutex_unlock(&resources_mutex
);
4997 if (!list_empty(&head
)) {
4998 struct drbd_state_change
*state_change
=
4999 list_entry(head
.next
, struct drbd_state_change
, list
);
5000 cb
->args
[0] = (long)state_change
;
5001 cb
->args
[3] = notifications_for_state_change(state_change
);
5002 list_del(&head
); /* detach list from head */
5005 cb
->args
[2] = cb
->nlh
->nlmsg_seq
;
5006 return get_initial_state(skb
, cb
);