4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
30 #include <linux/file.h>
31 #include <linux/slab.h>
32 #include <linux/blkpg.h>
33 #include <linux/cpumask.h>
36 #include "drbd_wrappers.h"
37 #include <asm/unaligned.h>
38 #include <linux/drbd_limits.h>
39 #include <linux/kthread.h>
41 #include <net/genetlink.h>
44 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
45 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
47 int drbd_adm_add_minor(struct sk_buff
*skb
, struct genl_info
*info
);
48 int drbd_adm_delete_minor(struct sk_buff
*skb
, struct genl_info
*info
);
50 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
);
51 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
);
52 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
);
54 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
);
55 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
);
56 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
);
57 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
);
58 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
);
59 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
);
60 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
);
61 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
);
62 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
);
63 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
);
64 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
);
65 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
);
66 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
);
67 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
);
68 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
);
69 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
);
70 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
);
71 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
);
72 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
);
73 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
);
75 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
);
77 #include <linux/drbd_genl_api.h>
79 #include <linux/genl_magic_func.h>
81 /* used blkdev_get_by_path, to claim our meta data device(s) */
82 static char *drbd_m_holder
= "Hands off! this is DRBD's meta data device.";
84 /* Configuration is strictly serialized, because generic netlink message
85 * processing is strictly serialized by the genl_lock().
86 * Which means we can use one static global drbd_config_context struct.
88 static struct drbd_config_context
{
89 /* assigned from drbd_genlmsghdr */
91 /* assigned from request attributes, if present */
93 #define VOLUME_UNSPECIFIED (-1U)
94 /* pointer into the request skb,
95 * limited lifetime! */
97 struct nlattr
*my_addr
;
98 struct nlattr
*peer_addr
;
101 struct sk_buff
*reply_skb
;
102 /* pointer into reply buffer */
103 struct drbd_genlmsghdr
*reply_dh
;
104 /* resolved from attributes, if possible */
105 struct drbd_conf
*mdev
;
106 struct drbd_tconn
*tconn
;
109 static void drbd_adm_send_reply(struct sk_buff
*skb
, struct genl_info
*info
)
111 genlmsg_end(skb
, genlmsg_data(nlmsg_data(nlmsg_hdr(skb
))));
112 if (genlmsg_reply(skb
, info
))
113 printk(KERN_ERR
"drbd: error sending genl reply\n");
116 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
117 * reason it could fail was no space in skb, and there are 4k available. */
118 int drbd_msg_put_info(const char *info
)
120 struct sk_buff
*skb
= adm_ctx
.reply_skb
;
124 if (!info
|| !info
[0])
127 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_REPLY
);
131 err
= nla_put_string(skb
, T_info_text
, info
);
133 nla_nest_cancel(skb
, nla
);
136 nla_nest_end(skb
, nla
);
140 /* This would be a good candidate for a "pre_doit" hook,
141 * and per-family private info->pointers.
142 * But we need to stay compatible with older kernels.
143 * If it returns successfully, adm_ctx members are valid.
145 #define DRBD_ADM_NEED_MINOR 1
146 #define DRBD_ADM_NEED_RESOURCE 2
147 #define DRBD_ADM_NEED_CONNECTION 4
148 static int drbd_adm_prepare(struct sk_buff
*skb
, struct genl_info
*info
,
151 struct drbd_genlmsghdr
*d_in
= info
->userhdr
;
152 const u8 cmd
= info
->genlhdr
->cmd
;
155 memset(&adm_ctx
, 0, sizeof(adm_ctx
));
157 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
158 if (cmd
!= DRBD_ADM_GET_STATUS
&& !capable(CAP_NET_ADMIN
))
161 adm_ctx
.reply_skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
162 if (!adm_ctx
.reply_skb
) {
167 adm_ctx
.reply_dh
= genlmsg_put_reply(adm_ctx
.reply_skb
,
168 info
, &drbd_genl_family
, 0, cmd
);
169 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
171 if (!adm_ctx
.reply_dh
) {
176 adm_ctx
.reply_dh
->minor
= d_in
->minor
;
177 adm_ctx
.reply_dh
->ret_code
= NO_ERROR
;
179 adm_ctx
.volume
= VOLUME_UNSPECIFIED
;
180 if (info
->attrs
[DRBD_NLA_CFG_CONTEXT
]) {
182 /* parse and validate only */
183 err
= drbd_cfg_context_from_attrs(NULL
, info
);
187 /* It was present, and valid,
188 * copy it over to the reply skb. */
189 err
= nla_put_nohdr(adm_ctx
.reply_skb
,
190 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]->nla_len
,
191 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]);
195 /* and assign stuff to the global adm_ctx */
196 nla
= nested_attr_tb
[__nla_type(T_ctx_volume
)];
198 adm_ctx
.volume
= nla_get_u32(nla
);
199 nla
= nested_attr_tb
[__nla_type(T_ctx_resource_name
)];
201 adm_ctx
.resource_name
= nla_data(nla
);
202 adm_ctx
.my_addr
= nested_attr_tb
[__nla_type(T_ctx_my_addr
)];
203 adm_ctx
.peer_addr
= nested_attr_tb
[__nla_type(T_ctx_peer_addr
)];
204 if ((adm_ctx
.my_addr
&&
205 nla_len(adm_ctx
.my_addr
) > sizeof(adm_ctx
.tconn
->my_addr
)) ||
206 (adm_ctx
.peer_addr
&&
207 nla_len(adm_ctx
.peer_addr
) > sizeof(adm_ctx
.tconn
->peer_addr
))) {
213 adm_ctx
.minor
= d_in
->minor
;
214 adm_ctx
.mdev
= minor_to_mdev(d_in
->minor
);
215 adm_ctx
.tconn
= conn_get_by_name(adm_ctx
.resource_name
);
217 if (!adm_ctx
.mdev
&& (flags
& DRBD_ADM_NEED_MINOR
)) {
218 drbd_msg_put_info("unknown minor");
219 return ERR_MINOR_INVALID
;
221 if (!adm_ctx
.tconn
&& (flags
& DRBD_ADM_NEED_RESOURCE
)) {
222 drbd_msg_put_info("unknown resource");
223 return ERR_INVALID_REQUEST
;
226 if (flags
& DRBD_ADM_NEED_CONNECTION
) {
227 if (adm_ctx
.tconn
&& !(flags
& DRBD_ADM_NEED_RESOURCE
)) {
228 drbd_msg_put_info("no resource name expected");
229 return ERR_INVALID_REQUEST
;
232 drbd_msg_put_info("no minor number expected");
233 return ERR_INVALID_REQUEST
;
235 if (adm_ctx
.my_addr
&& adm_ctx
.peer_addr
)
236 adm_ctx
.tconn
= conn_get_by_addrs(nla_data(adm_ctx
.my_addr
),
237 nla_len(adm_ctx
.my_addr
),
238 nla_data(adm_ctx
.peer_addr
),
239 nla_len(adm_ctx
.peer_addr
));
240 if (!adm_ctx
.tconn
) {
241 drbd_msg_put_info("unknown connection");
242 return ERR_INVALID_REQUEST
;
246 /* some more paranoia, if the request was over-determined */
247 if (adm_ctx
.mdev
&& adm_ctx
.tconn
&&
248 adm_ctx
.mdev
->tconn
!= adm_ctx
.tconn
) {
249 pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
250 adm_ctx
.minor
, adm_ctx
.resource_name
,
251 adm_ctx
.mdev
->tconn
->name
);
252 drbd_msg_put_info("minor exists in different resource");
253 return ERR_INVALID_REQUEST
;
256 adm_ctx
.volume
!= VOLUME_UNSPECIFIED
&&
257 adm_ctx
.volume
!= adm_ctx
.mdev
->vnr
) {
258 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
259 adm_ctx
.minor
, adm_ctx
.volume
,
260 adm_ctx
.mdev
->vnr
, adm_ctx
.mdev
->tconn
->name
);
261 drbd_msg_put_info("minor exists as different volume");
262 return ERR_INVALID_REQUEST
;
268 nlmsg_free(adm_ctx
.reply_skb
);
269 adm_ctx
.reply_skb
= NULL
;
273 static int drbd_adm_finish(struct genl_info
*info
, int retcode
)
276 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
277 adm_ctx
.tconn
= NULL
;
280 if (!adm_ctx
.reply_skb
)
283 adm_ctx
.reply_dh
->ret_code
= retcode
;
284 drbd_adm_send_reply(adm_ctx
.reply_skb
, info
);
288 static void setup_khelper_env(struct drbd_tconn
*tconn
, char **envp
)
292 /* FIXME: A future version will not allow this case. */
293 if (tconn
->my_addr_len
== 0 || tconn
->peer_addr_len
== 0)
296 switch (((struct sockaddr
*)&tconn
->peer_addr
)->sa_family
) {
299 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI6",
300 &((struct sockaddr_in6
*)&tconn
->peer_addr
)->sin6_addr
);
304 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
305 &((struct sockaddr_in
*)&tconn
->peer_addr
)->sin_addr
);
309 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
310 &((struct sockaddr_in
*)&tconn
->peer_addr
)->sin_addr
);
312 snprintf(envp
[3], 20, "DRBD_PEER_AF=%s", afs
);
315 int drbd_khelper(struct drbd_conf
*mdev
, char *cmd
)
317 char *envp
[] = { "HOME=/",
319 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
320 (char[20]) { }, /* address family */
321 (char[60]) { }, /* address */
324 char *argv
[] = {usermode_helper
, cmd
, mb
, NULL
};
325 struct drbd_tconn
*tconn
= mdev
->tconn
;
329 if (current
== tconn
->worker
.task
)
330 set_bit(CALLBACK_PENDING
, &tconn
->flags
);
332 snprintf(mb
, 12, "minor-%d", mdev_to_minor(mdev
));
333 setup_khelper_env(tconn
, envp
);
335 /* The helper may take some time.
336 * write out any unsynced meta data changes now */
339 dev_info(DEV
, "helper command: %s %s %s\n", usermode_helper
, cmd
, mb
);
340 sib
.sib_reason
= SIB_HELPER_PRE
;
341 sib
.helper_name
= cmd
;
342 drbd_bcast_event(mdev
, &sib
);
343 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
345 dev_warn(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
346 usermode_helper
, cmd
, mb
,
347 (ret
>> 8) & 0xff, ret
);
349 dev_info(DEV
, "helper command: %s %s %s exit code %u (0x%x)\n",
350 usermode_helper
, cmd
, mb
,
351 (ret
>> 8) & 0xff, ret
);
352 sib
.sib_reason
= SIB_HELPER_POST
;
353 sib
.helper_exit_code
= ret
;
354 drbd_bcast_event(mdev
, &sib
);
356 if (current
== tconn
->worker
.task
)
357 clear_bit(CALLBACK_PENDING
, &tconn
->flags
);
359 if (ret
< 0) /* Ignore any ERRNOs we got. */
365 int conn_khelper(struct drbd_tconn
*tconn
, char *cmd
)
367 char *envp
[] = { "HOME=/",
369 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
370 (char[20]) { }, /* address family */
371 (char[60]) { }, /* address */
373 char *argv
[] = {usermode_helper
, cmd
, tconn
->name
, NULL
};
376 setup_khelper_env(tconn
, envp
);
379 conn_info(tconn
, "helper command: %s %s %s\n", usermode_helper
, cmd
, tconn
->name
);
380 /* TODO: conn_bcast_event() ?? */
382 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
384 conn_warn(tconn
, "helper command: %s %s %s exit code %u (0x%x)\n",
385 usermode_helper
, cmd
, tconn
->name
,
386 (ret
>> 8) & 0xff, ret
);
388 conn_info(tconn
, "helper command: %s %s %s exit code %u (0x%x)\n",
389 usermode_helper
, cmd
, tconn
->name
,
390 (ret
>> 8) & 0xff, ret
);
391 /* TODO: conn_bcast_event() ?? */
393 if (ret
< 0) /* Ignore any ERRNOs we got. */
399 static enum drbd_fencing_p
highest_fencing_policy(struct drbd_tconn
*tconn
)
401 enum drbd_fencing_p fp
= FP_NOT_AVAIL
;
402 struct drbd_conf
*mdev
;
406 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
407 if (get_ldev_if_state(mdev
, D_CONSISTENT
)) {
408 fp
= max_t(enum drbd_fencing_p
, fp
,
409 rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
);
418 bool conn_try_outdate_peer(struct drbd_tconn
*tconn
)
420 unsigned int connect_cnt
;
421 union drbd_state mask
= { };
422 union drbd_state val
= { };
423 enum drbd_fencing_p fp
;
427 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
) {
428 conn_err(tconn
, "Expected cstate < C_WF_REPORT_PARAMS\n");
432 spin_lock_irq(&tconn
->req_lock
);
433 connect_cnt
= tconn
->connect_cnt
;
434 spin_unlock_irq(&tconn
->req_lock
);
436 fp
= highest_fencing_policy(tconn
);
439 conn_warn(tconn
, "Not fencing peer, I'm not even Consistent myself.\n");
446 r
= conn_khelper(tconn
, "fence-peer");
448 switch ((r
>>8) & 0xff) {
449 case 3: /* peer is inconsistent */
450 ex_to_string
= "peer is inconsistent or worse";
452 val
.pdsk
= D_INCONSISTENT
;
454 case 4: /* peer got outdated, or was already outdated */
455 ex_to_string
= "peer was fenced";
457 val
.pdsk
= D_OUTDATED
;
459 case 5: /* peer was down */
460 if (conn_highest_disk(tconn
) == D_UP_TO_DATE
) {
461 /* we will(have) create(d) a new UUID anyways... */
462 ex_to_string
= "peer is unreachable, assumed to be dead";
464 val
.pdsk
= D_OUTDATED
;
466 ex_to_string
= "peer unreachable, doing nothing since disk != UpToDate";
469 case 6: /* Peer is primary, voluntarily outdate myself.
470 * This is useful when an unconnected R_SECONDARY is asked to
471 * become R_PRIMARY, but finds the other peer being active. */
472 ex_to_string
= "peer is active";
473 conn_warn(tconn
, "Peer is primary, outdating myself.\n");
475 val
.disk
= D_OUTDATED
;
478 if (fp
!= FP_STONITH
)
479 conn_err(tconn
, "fence-peer() = 7 && fencing != Stonith !!!\n");
480 ex_to_string
= "peer was stonithed";
482 val
.pdsk
= D_OUTDATED
;
485 /* The script is broken ... */
486 conn_err(tconn
, "fence-peer helper broken, returned %d\n", (r
>>8)&0xff);
487 return false; /* Eventually leave IO frozen */
490 conn_info(tconn
, "fence-peer helper returned %d (%s)\n",
491 (r
>>8) & 0xff, ex_to_string
);
496 conn_request_state(tconn, mask, val, CS_VERBOSE);
497 here, because we might were able to re-establish the connection in the
499 spin_lock_irq(&tconn
->req_lock
);
500 if (tconn
->cstate
< C_WF_REPORT_PARAMS
&& !test_bit(STATE_SENT
, &tconn
->flags
)) {
501 if (tconn
->connect_cnt
!= connect_cnt
)
502 /* In case the connection was established and droped
503 while the fence-peer handler was running, ignore it */
504 conn_info(tconn
, "Ignoring fence-peer exit code\n");
506 _conn_request_state(tconn
, mask
, val
, CS_VERBOSE
);
508 spin_unlock_irq(&tconn
->req_lock
);
510 return conn_highest_pdsk(tconn
) <= D_OUTDATED
;
513 static int _try_outdate_peer_async(void *data
)
515 struct drbd_tconn
*tconn
= (struct drbd_tconn
*)data
;
517 conn_try_outdate_peer(tconn
);
519 kref_put(&tconn
->kref
, &conn_destroy
);
523 void conn_try_outdate_peer_async(struct drbd_tconn
*tconn
)
525 struct task_struct
*opa
;
527 kref_get(&tconn
->kref
);
528 /* We may just have force_sig()'ed this thread
529 * to get it out of some blocking network function.
530 * Clear signals; otherwise kthread_run(), which internally uses
531 * wait_on_completion_killable(), will mistake our pending signal
532 * for a new fatal signal and fail. */
533 flush_signals(current
);
534 opa
= kthread_run(_try_outdate_peer_async
, tconn
, "drbd_async_h");
536 conn_err(tconn
, "out of mem, failed to invoke fence-peer helper\n");
537 kref_put(&tconn
->kref
, &conn_destroy
);
542 drbd_set_role(struct drbd_conf
*mdev
, enum drbd_role new_role
, int force
)
544 const int max_tries
= 4;
545 enum drbd_state_rv rv
= SS_UNKNOWN_ERROR
;
549 union drbd_state mask
, val
;
551 if (new_role
== R_PRIMARY
)
552 request_ping(mdev
->tconn
); /* Detect a dead peer ASAP */
554 mutex_lock(mdev
->state_mutex
);
556 mask
.i
= 0; mask
.role
= R_MASK
;
557 val
.i
= 0; val
.role
= new_role
;
559 while (try++ < max_tries
) {
560 rv
= _drbd_request_state(mdev
, mask
, val
, CS_WAIT_COMPLETE
);
562 /* in case we first succeeded to outdate,
563 * but now suddenly could establish a connection */
564 if (rv
== SS_CW_FAILED_BY_PEER
&& mask
.pdsk
!= 0) {
570 if (rv
== SS_NO_UP_TO_DATE_DISK
&& force
&&
571 (mdev
->state
.disk
< D_UP_TO_DATE
&&
572 mdev
->state
.disk
>= D_INCONSISTENT
)) {
574 val
.disk
= D_UP_TO_DATE
;
579 if (rv
== SS_NO_UP_TO_DATE_DISK
&&
580 mdev
->state
.disk
== D_CONSISTENT
&& mask
.pdsk
== 0) {
581 D_ASSERT(mdev
->state
.pdsk
== D_UNKNOWN
);
583 if (conn_try_outdate_peer(mdev
->tconn
)) {
584 val
.disk
= D_UP_TO_DATE
;
590 if (rv
== SS_NOTHING_TO_DO
)
592 if (rv
== SS_PRIMARY_NOP
&& mask
.pdsk
== 0) {
593 if (!conn_try_outdate_peer(mdev
->tconn
) && force
) {
594 dev_warn(DEV
, "Forced into split brain situation!\n");
596 val
.pdsk
= D_OUTDATED
;
601 if (rv
== SS_TWO_PRIMARIES
) {
602 /* Maybe the peer is detected as dead very soon...
603 retry at most once more in this case. */
606 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
607 timeo
= nc
? (nc
->ping_timeo
+ 1) * HZ
/ 10 : 1;
609 schedule_timeout_interruptible(timeo
);
614 if (rv
< SS_SUCCESS
) {
615 rv
= _drbd_request_state(mdev
, mask
, val
,
616 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
627 dev_warn(DEV
, "Forced to consider local data as UpToDate!\n");
629 /* Wait until nothing is on the fly :) */
630 wait_event(mdev
->misc_wait
, atomic_read(&mdev
->ap_pending_cnt
) == 0);
632 /* FIXME also wait for all pending P_BARRIER_ACK? */
634 if (new_role
== R_SECONDARY
) {
635 set_disk_ro(mdev
->vdisk
, true);
636 if (get_ldev(mdev
)) {
637 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
641 mutex_lock(&mdev
->tconn
->conf_update
);
642 nc
= mdev
->tconn
->net_conf
;
644 nc
->discard_my_data
= 0; /* without copy; single bit op is atomic */
645 mutex_unlock(&mdev
->tconn
->conf_update
);
647 set_disk_ro(mdev
->vdisk
, false);
648 if (get_ldev(mdev
)) {
649 if (((mdev
->state
.conn
< C_CONNECTED
||
650 mdev
->state
.pdsk
<= D_FAILED
)
651 && mdev
->ldev
->md
.uuid
[UI_BITMAP
] == 0) || forced
)
652 drbd_uuid_new_current(mdev
);
654 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
659 /* writeout of activity log covered areas of the bitmap
660 * to stable storage done in after state change already */
662 if (mdev
->state
.conn
>= C_WF_REPORT_PARAMS
) {
663 /* if this was forced, we should consider sync */
665 drbd_send_uuids(mdev
);
666 drbd_send_current_state(mdev
);
671 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
673 mutex_unlock(mdev
->state_mutex
);
677 static const char *from_attrs_err_to_txt(int err
)
679 return err
== -ENOMSG
? "required attribute missing" :
680 err
== -EOPNOTSUPP
? "unknown mandatory attribute" :
681 err
== -EEXIST
? "can not change invariant setting" :
682 "invalid attribute value";
685 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
)
687 struct set_role_parms parms
;
689 enum drbd_ret_code retcode
;
691 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
692 if (!adm_ctx
.reply_skb
)
694 if (retcode
!= NO_ERROR
)
697 memset(&parms
, 0, sizeof(parms
));
698 if (info
->attrs
[DRBD_NLA_SET_ROLE_PARMS
]) {
699 err
= set_role_parms_from_attrs(&parms
, info
);
701 retcode
= ERR_MANDATORY_TAG
;
702 drbd_msg_put_info(from_attrs_err_to_txt(err
));
707 if (info
->genlhdr
->cmd
== DRBD_ADM_PRIMARY
)
708 retcode
= drbd_set_role(adm_ctx
.mdev
, R_PRIMARY
, parms
.assume_uptodate
);
710 retcode
= drbd_set_role(adm_ctx
.mdev
, R_SECONDARY
, 0);
712 drbd_adm_finish(info
, retcode
);
716 /* Initializes the md.*_offset members, so we are able to find
717 * the on disk meta data.
719 * We currently have two possible layouts:
721 * |----------- md_size_sect ------------------|
722 * [ 4k superblock ][ activity log ][ Bitmap ]
724 * | bm_offset = al_offset + X |
725 * ==> bitmap sectors = md_size_sect - bm_offset
728 * |----------- md_size_sect ------------------|
729 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
731 * | bm_offset = al_offset - Y |
732 * ==> bitmap sectors = Y = al_offset - bm_offset
734 * Activity log size used to be fixed 32kB,
735 * but is about to become configurable.
737 static void drbd_md_set_sector_offsets(struct drbd_conf
*mdev
,
738 struct drbd_backing_dev
*bdev
)
740 sector_t md_size_sect
= 0;
741 unsigned int al_size_sect
= bdev
->md
.al_size_4k
* 8;
743 bdev
->md
.md_offset
= drbd_md_ss(bdev
);
745 switch (bdev
->md
.meta_dev_idx
) {
747 /* v07 style fixed size indexed meta data */
748 bdev
->md
.md_size_sect
= MD_128MB_SECT
;
749 bdev
->md
.al_offset
= MD_4kB_SECT
;
750 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
752 case DRBD_MD_INDEX_FLEX_EXT
:
753 /* just occupy the full device; unit: sectors */
754 bdev
->md
.md_size_sect
= drbd_get_capacity(bdev
->md_bdev
);
755 bdev
->md
.al_offset
= MD_4kB_SECT
;
756 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
758 case DRBD_MD_INDEX_INTERNAL
:
759 case DRBD_MD_INDEX_FLEX_INT
:
760 /* al size is still fixed */
761 bdev
->md
.al_offset
= -al_size_sect
;
762 /* we need (slightly less than) ~ this much bitmap sectors: */
763 md_size_sect
= drbd_get_capacity(bdev
->backing_bdev
);
764 md_size_sect
= ALIGN(md_size_sect
, BM_SECT_PER_EXT
);
765 md_size_sect
= BM_SECT_TO_EXT(md_size_sect
);
766 md_size_sect
= ALIGN(md_size_sect
, 8);
768 /* plus the "drbd meta data super block",
769 * and the activity log; */
770 md_size_sect
+= MD_4kB_SECT
+ al_size_sect
;
772 bdev
->md
.md_size_sect
= md_size_sect
;
773 /* bitmap offset is adjusted by 'super' block size */
774 bdev
->md
.bm_offset
= -md_size_sect
+ MD_4kB_SECT
;
779 /* input size is expected to be in KB */
780 char *ppsize(char *buf
, unsigned long long size
)
782 /* Needs 9 bytes at max including trailing NUL:
783 * -1ULL ==> "16384 EB" */
784 static char units
[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
786 while (size
>= 10000 && base
< sizeof(units
)-1) {
788 size
= (size
>> 10) + !!(size
& (1<<9));
791 sprintf(buf
, "%u %cB", (unsigned)size
, units
[base
]);
796 /* there is still a theoretical deadlock when called from receiver
797 * on an D_INCONSISTENT R_PRIMARY:
798 * remote READ does inc_ap_bio, receiver would need to receive answer
799 * packet from remote to dec_ap_bio again.
800 * receiver receive_sizes(), comes here,
801 * waits for ap_bio_cnt == 0. -> deadlock.
802 * but this cannot happen, actually, because:
803 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
804 * (not connected, or bad/no disk on peer):
805 * see drbd_fail_request_early, ap_bio_cnt is zero.
806 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
807 * peer may not initiate a resize.
809 /* Note these are not to be confused with
810 * drbd_adm_suspend_io/drbd_adm_resume_io,
811 * which are (sub) state changes triggered by admin (drbdsetup),
812 * and can be long lived.
813 * This changes an mdev->flag, is triggered by drbd internals,
814 * and should be short-lived. */
815 void drbd_suspend_io(struct drbd_conf
*mdev
)
817 set_bit(SUSPEND_IO
, &mdev
->flags
);
818 if (drbd_suspended(mdev
))
820 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_bio_cnt
));
823 void drbd_resume_io(struct drbd_conf
*mdev
)
825 clear_bit(SUSPEND_IO
, &mdev
->flags
);
826 wake_up(&mdev
->misc_wait
);
830 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
831 * @mdev: DRBD device.
833 * Returns 0 on success, negative return values indicate errors.
834 * You should call drbd_md_sync() after calling this function.
836 enum determine_dev_size
837 drbd_determine_dev_size(struct drbd_conf
*mdev
, enum dds_flags flags
, struct resize_parms
*rs
) __must_hold(local
)
839 sector_t prev_first_sect
, prev_size
; /* previous meta location */
840 sector_t la_size_sect
, u_size
;
841 struct drbd_md
*md
= &mdev
->ldev
->md
;
842 u32 prev_al_stripe_size_4k
;
848 int md_moved
, la_size_changed
;
849 enum determine_dev_size rv
= DS_UNCHANGED
;
852 * application request passes inc_ap_bio,
853 * but then cannot get an AL-reference.
854 * this function later may wait on ap_bio_cnt == 0. -> deadlock.
857 * Suspend IO right here.
858 * still lock the act_log to not trigger ASSERTs there.
860 drbd_suspend_io(mdev
);
861 buffer
= drbd_md_get_buffer(mdev
); /* Lock meta-data IO */
863 drbd_resume_io(mdev
);
867 /* no wait necessary anymore, actually we could assert that */
868 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
870 prev_first_sect
= drbd_md_first_sector(mdev
->ldev
);
871 prev_size
= mdev
->ldev
->md
.md_size_sect
;
872 la_size_sect
= mdev
->ldev
->md
.la_size_sect
;
875 /* rs is non NULL if we should change the AL layout only */
877 prev_al_stripes
= md
->al_stripes
;
878 prev_al_stripe_size_4k
= md
->al_stripe_size_4k
;
880 md
->al_stripes
= rs
->al_stripes
;
881 md
->al_stripe_size_4k
= rs
->al_stripe_size
/ 4;
882 md
->al_size_4k
= (u64
)rs
->al_stripes
* rs
->al_stripe_size
/ 4;
885 drbd_md_set_sector_offsets(mdev
, mdev
->ldev
);
888 u_size
= rcu_dereference(mdev
->ldev
->disk_conf
)->disk_size
;
890 size
= drbd_new_dev_size(mdev
, mdev
->ldev
, u_size
, flags
& DDSF_FORCED
);
892 if (size
< la_size_sect
) {
893 if (rs
&& u_size
== 0) {
894 /* Remove "rs &&" later. This check should always be active, but
895 right now the receiver expects the permissive behavior */
896 dev_warn(DEV
, "Implicit shrink not allowed. "
897 "Use --size=%llus for explicit shrink.\n",
898 (unsigned long long)size
);
899 rv
= DS_ERROR_SHRINK
;
902 rv
= DS_ERROR_SPACE_MD
;
903 if (rv
!= DS_UNCHANGED
)
907 if (drbd_get_capacity(mdev
->this_bdev
) != size
||
908 drbd_bm_capacity(mdev
) != size
) {
910 err
= drbd_bm_resize(mdev
, size
, !(flags
& DDSF_NO_RESYNC
));
912 /* currently there is only one error: ENOMEM! */
913 size
= drbd_bm_capacity(mdev
)>>1;
915 dev_err(DEV
, "OUT OF MEMORY! "
916 "Could not allocate bitmap!\n");
918 dev_err(DEV
, "BM resizing failed. "
919 "Leaving size unchanged at size = %lu KB\n",
920 (unsigned long)size
);
924 /* racy, see comments above. */
925 drbd_set_my_capacity(mdev
, size
);
926 mdev
->ldev
->md
.la_size_sect
= size
;
927 dev_info(DEV
, "size = %s (%llu KB)\n", ppsize(ppb
, size
>>1),
928 (unsigned long long)size
>>1);
933 la_size_changed
= (la_size_sect
!= mdev
->ldev
->md
.la_size_sect
);
935 md_moved
= prev_first_sect
!= drbd_md_first_sector(mdev
->ldev
)
936 || prev_size
!= mdev
->ldev
->md
.md_size_sect
;
938 if (la_size_changed
|| md_moved
|| rs
) {
941 drbd_al_shrink(mdev
); /* All extents inactive. */
943 prev_flags
= md
->flags
;
944 md
->flags
&= ~MDF_PRIMARY_IND
;
945 drbd_md_write(mdev
, buffer
);
947 dev_info(DEV
, "Writing the whole bitmap, %s\n",
948 la_size_changed
&& md_moved
? "size changed and md moved" :
949 la_size_changed
? "size changed" : "md moved");
950 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
951 drbd_bitmap_io(mdev
, md_moved
? &drbd_bm_write_all
: &drbd_bm_write
,
952 "size changed", BM_LOCKED_MASK
);
953 drbd_initialize_al(mdev
, buffer
);
955 md
->flags
= prev_flags
;
956 drbd_md_write(mdev
, buffer
);
959 dev_info(DEV
, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
960 md
->al_stripes
, md
->al_stripe_size_4k
* 4);
963 if (size
> la_size_sect
)
965 if (size
< la_size_sect
)
971 md
->al_stripes
= prev_al_stripes
;
972 md
->al_stripe_size_4k
= prev_al_stripe_size_4k
;
973 md
->al_size_4k
= (u64
)prev_al_stripes
* prev_al_stripe_size_4k
;
975 drbd_md_set_sector_offsets(mdev
, mdev
->ldev
);
978 lc_unlock(mdev
->act_log
);
979 wake_up(&mdev
->al_wait
);
980 drbd_md_put_buffer(mdev
);
981 drbd_resume_io(mdev
);
987 drbd_new_dev_size(struct drbd_conf
*mdev
, struct drbd_backing_dev
*bdev
,
988 sector_t u_size
, int assume_peer_has_space
)
990 sector_t p_size
= mdev
->p_size
; /* partner's disk size. */
991 sector_t la_size_sect
= bdev
->md
.la_size_sect
; /* last agreed size. */
992 sector_t m_size
; /* my size */
995 m_size
= drbd_get_max_capacity(bdev
);
997 if (mdev
->state
.conn
< C_CONNECTED
&& assume_peer_has_space
) {
998 dev_warn(DEV
, "Resize while not connected was forced by the user!\n");
1002 if (p_size
&& m_size
) {
1003 size
= min_t(sector_t
, p_size
, m_size
);
1006 size
= la_size_sect
;
1007 if (m_size
&& m_size
< size
)
1009 if (p_size
&& p_size
< size
)
1020 dev_err(DEV
, "Both nodes diskless!\n");
1024 dev_err(DEV
, "Requested disk size is too big (%lu > %lu)\n",
1025 (unsigned long)u_size
>>1, (unsigned long)size
>>1);
1034 * drbd_check_al_size() - Ensures that the AL is of the right size
1035 * @mdev: DRBD device.
1037 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1038 * failed, and 0 on success. You should call drbd_md_sync() after you called
1041 static int drbd_check_al_size(struct drbd_conf
*mdev
, struct disk_conf
*dc
)
1043 struct lru_cache
*n
, *t
;
1044 struct lc_element
*e
;
1045 unsigned int in_use
;
1048 if (mdev
->act_log
&&
1049 mdev
->act_log
->nr_elements
== dc
->al_extents
)
1054 n
= lc_create("act_log", drbd_al_ext_cache
, AL_UPDATES_PER_TRANSACTION
,
1055 dc
->al_extents
, sizeof(struct lc_element
), 0);
1058 dev_err(DEV
, "Cannot allocate act_log lru!\n");
1061 spin_lock_irq(&mdev
->al_lock
);
1063 for (i
= 0; i
< t
->nr_elements
; i
++) {
1064 e
= lc_element_by_index(t
, i
);
1066 dev_err(DEV
, "refcnt(%d)==%d\n",
1067 e
->lc_number
, e
->refcnt
);
1068 in_use
+= e
->refcnt
;
1073 spin_unlock_irq(&mdev
->al_lock
);
1075 dev_err(DEV
, "Activity log still in use!\n");
1082 drbd_md_mark_dirty(mdev
); /* we changed mdev->act_log->nr_elemens */
1086 static void drbd_setup_queue_param(struct drbd_conf
*mdev
, unsigned int max_bio_size
)
1088 struct request_queue
* const q
= mdev
->rq_queue
;
1089 unsigned int max_hw_sectors
= max_bio_size
>> 9;
1090 unsigned int max_segments
= 0;
1092 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
1093 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
1095 max_hw_sectors
= min(queue_max_hw_sectors(b
), max_bio_size
>> 9);
1097 max_segments
= rcu_dereference(mdev
->ldev
->disk_conf
)->max_bio_bvecs
;
1102 blk_queue_logical_block_size(q
, 512);
1103 blk_queue_max_hw_sectors(q
, max_hw_sectors
);
1104 /* This is the workaround for "bio would need to, but cannot, be split" */
1105 blk_queue_max_segments(q
, max_segments
? max_segments
: BLK_MAX_SEGMENTS
);
1106 blk_queue_segment_boundary(q
, PAGE_CACHE_SIZE
-1);
1108 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
1109 struct request_queue
* const b
= mdev
->ldev
->backing_bdev
->bd_disk
->queue
;
1111 blk_queue_stack_limits(q
, b
);
1113 if (q
->backing_dev_info
.ra_pages
!= b
->backing_dev_info
.ra_pages
) {
1114 dev_info(DEV
, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1115 q
->backing_dev_info
.ra_pages
,
1116 b
->backing_dev_info
.ra_pages
);
1117 q
->backing_dev_info
.ra_pages
= b
->backing_dev_info
.ra_pages
;
1123 void drbd_reconsider_max_bio_size(struct drbd_conf
*mdev
)
1125 unsigned int now
, new, local
, peer
;
1127 now
= queue_max_hw_sectors(mdev
->rq_queue
) << 9;
1128 local
= mdev
->local_max_bio_size
; /* Eventually last known value, from volatile memory */
1129 peer
= mdev
->peer_max_bio_size
; /* Eventually last known value, from meta data */
1131 if (get_ldev_if_state(mdev
, D_ATTACHING
)) {
1132 local
= queue_max_hw_sectors(mdev
->ldev
->backing_bdev
->bd_disk
->queue
) << 9;
1133 mdev
->local_max_bio_size
= local
;
1136 local
= min(local
, DRBD_MAX_BIO_SIZE
);
1138 /* We may ignore peer limits if the peer is modern enough.
1139 Because new from 8.3.8 onwards the peer can use multiple
1140 BIOs for a single peer_request */
1141 if (mdev
->state
.conn
>= C_CONNECTED
) {
1142 if (mdev
->tconn
->agreed_pro_version
< 94)
1143 peer
= min( mdev
->peer_max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
1144 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1145 else if (mdev
->tconn
->agreed_pro_version
== 94)
1146 peer
= DRBD_MAX_SIZE_H80_PACKET
;
1147 else if (mdev
->tconn
->agreed_pro_version
< 100)
1148 peer
= DRBD_MAX_BIO_SIZE_P95
; /* drbd 8.3.8 onwards, before 8.4.0 */
1150 peer
= DRBD_MAX_BIO_SIZE
;
1153 new = min(local
, peer
);
1155 if (mdev
->state
.role
== R_PRIMARY
&& new < now
)
1156 dev_err(DEV
, "ASSERT FAILED new < now; (%u < %u)\n", new, now
);
1159 dev_info(DEV
, "max BIO size = %u\n", new);
1161 drbd_setup_queue_param(mdev
, new);
1164 /* Starts the worker thread */
1165 static void conn_reconfig_start(struct drbd_tconn
*tconn
)
1167 drbd_thread_start(&tconn
->worker
);
1168 conn_flush_workqueue(tconn
);
1171 /* if still unconfigured, stops worker again. */
1172 static void conn_reconfig_done(struct drbd_tconn
*tconn
)
1175 spin_lock_irq(&tconn
->req_lock
);
1176 stop_threads
= conn_all_vols_unconf(tconn
) &&
1177 tconn
->cstate
== C_STANDALONE
;
1178 spin_unlock_irq(&tconn
->req_lock
);
1180 /* asender is implicitly stopped by receiver
1181 * in conn_disconnect() */
1182 drbd_thread_stop(&tconn
->receiver
);
1183 drbd_thread_stop(&tconn
->worker
);
1187 /* Make sure IO is suspended before calling this function(). */
1188 static void drbd_suspend_al(struct drbd_conf
*mdev
)
1192 if (!lc_try_lock(mdev
->act_log
)) {
1193 dev_warn(DEV
, "Failed to lock al in drbd_suspend_al()\n");
1197 drbd_al_shrink(mdev
);
1198 spin_lock_irq(&mdev
->tconn
->req_lock
);
1199 if (mdev
->state
.conn
< C_CONNECTED
)
1200 s
= !test_and_set_bit(AL_SUSPENDED
, &mdev
->flags
);
1201 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1202 lc_unlock(mdev
->act_log
);
1205 dev_info(DEV
, "Suspended AL updates\n");
1209 static bool should_set_defaults(struct genl_info
*info
)
1211 unsigned flags
= ((struct drbd_genlmsghdr
*)info
->userhdr
)->flags
;
1212 return 0 != (flags
& DRBD_GENL_F_SET_DEFAULTS
);
1215 static unsigned int drbd_al_extents_max(struct drbd_backing_dev
*bdev
)
1217 /* This is limited by 16 bit "slot" numbers,
1218 * and by available on-disk context storage.
1220 * Also (u16)~0 is special (denotes a "free" extent).
1222 * One transaction occupies one 4kB on-disk block,
1223 * we have n such blocks in the on disk ring buffer,
1224 * the "current" transaction may fail (n-1),
1225 * and there is 919 slot numbers context information per transaction.
1227 * 72 transaction blocks amounts to more than 2**16 context slots,
1228 * so cap there first.
1230 const unsigned int max_al_nr
= DRBD_AL_EXTENTS_MAX
;
1231 const unsigned int sufficient_on_disk
=
1232 (max_al_nr
+ AL_CONTEXT_PER_TRANSACTION
-1)
1233 /AL_CONTEXT_PER_TRANSACTION
;
1235 unsigned int al_size_4k
= bdev
->md
.al_size_4k
;
1237 if (al_size_4k
> sufficient_on_disk
)
1240 return (al_size_4k
- 1) * AL_CONTEXT_PER_TRANSACTION
;
1243 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1245 enum drbd_ret_code retcode
;
1246 struct drbd_conf
*mdev
;
1247 struct disk_conf
*new_disk_conf
, *old_disk_conf
;
1248 struct fifo_buffer
*old_plan
= NULL
, *new_plan
= NULL
;
1251 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1252 if (!adm_ctx
.reply_skb
)
1254 if (retcode
!= NO_ERROR
)
1257 mdev
= adm_ctx
.mdev
;
1259 /* we also need a disk
1260 * to change the options on */
1261 if (!get_ldev(mdev
)) {
1262 retcode
= ERR_NO_DISK
;
1266 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1267 if (!new_disk_conf
) {
1268 retcode
= ERR_NOMEM
;
1272 mutex_lock(&mdev
->tconn
->conf_update
);
1273 old_disk_conf
= mdev
->ldev
->disk_conf
;
1274 *new_disk_conf
= *old_disk_conf
;
1275 if (should_set_defaults(info
))
1276 set_disk_conf_defaults(new_disk_conf
);
1278 err
= disk_conf_from_attrs_for_change(new_disk_conf
, info
);
1279 if (err
&& err
!= -ENOMSG
) {
1280 retcode
= ERR_MANDATORY_TAG
;
1281 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1284 if (!expect(new_disk_conf
->resync_rate
>= 1))
1285 new_disk_conf
->resync_rate
= 1;
1287 if (new_disk_conf
->al_extents
< DRBD_AL_EXTENTS_MIN
)
1288 new_disk_conf
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1289 if (new_disk_conf
->al_extents
> drbd_al_extents_max(mdev
->ldev
))
1290 new_disk_conf
->al_extents
= drbd_al_extents_max(mdev
->ldev
);
1292 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1293 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1295 fifo_size
= (new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
1296 if (fifo_size
!= mdev
->rs_plan_s
->size
) {
1297 new_plan
= fifo_alloc(fifo_size
);
1299 dev_err(DEV
, "kmalloc of fifo_buffer failed");
1300 retcode
= ERR_NOMEM
;
1305 drbd_suspend_io(mdev
);
1306 wait_event(mdev
->al_wait
, lc_try_lock(mdev
->act_log
));
1307 drbd_al_shrink(mdev
);
1308 err
= drbd_check_al_size(mdev
, new_disk_conf
);
1309 lc_unlock(mdev
->act_log
);
1310 wake_up(&mdev
->al_wait
);
1311 drbd_resume_io(mdev
);
1314 retcode
= ERR_NOMEM
;
1318 write_lock_irq(&global_state_lock
);
1319 retcode
= drbd_resync_after_valid(mdev
, new_disk_conf
->resync_after
);
1320 if (retcode
== NO_ERROR
) {
1321 rcu_assign_pointer(mdev
->ldev
->disk_conf
, new_disk_conf
);
1322 drbd_resync_after_changed(mdev
);
1324 write_unlock_irq(&global_state_lock
);
1326 if (retcode
!= NO_ERROR
)
1330 old_plan
= mdev
->rs_plan_s
;
1331 rcu_assign_pointer(mdev
->rs_plan_s
, new_plan
);
1334 mutex_unlock(&mdev
->tconn
->conf_update
);
1336 if (new_disk_conf
->al_updates
)
1337 mdev
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
1339 mdev
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
1341 if (new_disk_conf
->md_flushes
)
1342 clear_bit(MD_NO_FUA
, &mdev
->flags
);
1344 set_bit(MD_NO_FUA
, &mdev
->flags
);
1346 drbd_bump_write_ordering(mdev
->tconn
, WO_bdev_flush
);
1350 if (mdev
->state
.conn
>= C_CONNECTED
)
1351 drbd_send_sync_param(mdev
);
1354 kfree(old_disk_conf
);
1356 mod_timer(&mdev
->request_timer
, jiffies
+ HZ
);
1360 mutex_unlock(&mdev
->tconn
->conf_update
);
1362 kfree(new_disk_conf
);
1367 drbd_adm_finish(info
, retcode
);
1371 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
)
1373 struct drbd_conf
*mdev
;
1375 enum drbd_ret_code retcode
;
1376 enum determine_dev_size dd
;
1377 sector_t max_possible_sectors
;
1378 sector_t min_md_device_sectors
;
1379 struct drbd_backing_dev
*nbc
= NULL
; /* new_backing_conf */
1380 struct disk_conf
*new_disk_conf
= NULL
;
1381 struct block_device
*bdev
;
1382 struct lru_cache
*resync_lru
= NULL
;
1383 struct fifo_buffer
*new_plan
= NULL
;
1384 union drbd_state ns
, os
;
1385 enum drbd_state_rv rv
;
1386 struct net_conf
*nc
;
1388 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1389 if (!adm_ctx
.reply_skb
)
1391 if (retcode
!= NO_ERROR
)
1394 mdev
= adm_ctx
.mdev
;
1395 conn_reconfig_start(mdev
->tconn
);
1397 /* if you want to reconfigure, please tear down first */
1398 if (mdev
->state
.disk
> D_DISKLESS
) {
1399 retcode
= ERR_DISK_CONFIGURED
;
1402 /* It may just now have detached because of IO error. Make sure
1403 * drbd_ldev_destroy is done already, we may end up here very fast,
1404 * e.g. if someone calls attach from the on-io-error handler,
1405 * to realize a "hot spare" feature (not that I'd recommend that) */
1406 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->local_cnt
));
1408 /* make sure there is no leftover from previous force-detach attempts */
1409 clear_bit(FORCE_DETACH
, &mdev
->flags
);
1410 clear_bit(WAS_IO_ERROR
, &mdev
->flags
);
1411 clear_bit(WAS_READ_ERROR
, &mdev
->flags
);
1413 /* and no leftover from previously aborted resync or verify, either */
1415 mdev
->rs_failed
= 0;
1416 atomic_set(&mdev
->rs_pending_cnt
, 0);
1418 /* allocation not in the IO path, drbdsetup context */
1419 nbc
= kzalloc(sizeof(struct drbd_backing_dev
), GFP_KERNEL
);
1421 retcode
= ERR_NOMEM
;
1424 spin_lock_init(&nbc
->md
.uuid_lock
);
1426 new_disk_conf
= kzalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1427 if (!new_disk_conf
) {
1428 retcode
= ERR_NOMEM
;
1431 nbc
->disk_conf
= new_disk_conf
;
1433 set_disk_conf_defaults(new_disk_conf
);
1434 err
= disk_conf_from_attrs(new_disk_conf
, info
);
1436 retcode
= ERR_MANDATORY_TAG
;
1437 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1441 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1442 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1444 new_plan
= fifo_alloc((new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
);
1446 retcode
= ERR_NOMEM
;
1450 if (new_disk_conf
->meta_dev_idx
< DRBD_MD_INDEX_FLEX_INT
) {
1451 retcode
= ERR_MD_IDX_INVALID
;
1455 write_lock_irq(&global_state_lock
);
1456 retcode
= drbd_resync_after_valid(mdev
, new_disk_conf
->resync_after
);
1457 write_unlock_irq(&global_state_lock
);
1458 if (retcode
!= NO_ERROR
)
1462 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
1464 if (new_disk_conf
->fencing
== FP_STONITH
&& nc
->wire_protocol
== DRBD_PROT_A
) {
1466 retcode
= ERR_STONITH_AND_PROT_A
;
1472 bdev
= blkdev_get_by_path(new_disk_conf
->backing_dev
,
1473 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
, mdev
);
1475 dev_err(DEV
, "open(\"%s\") failed with %ld\n", new_disk_conf
->backing_dev
,
1477 retcode
= ERR_OPEN_DISK
;
1480 nbc
->backing_bdev
= bdev
;
1483 * meta_dev_idx >= 0: external fixed size, possibly multiple
1484 * drbd sharing one meta device. TODO in that case, paranoia
1485 * check that [md_bdev, meta_dev_idx] is not yet used by some
1486 * other drbd minor! (if you use drbd.conf + drbdadm, that
1487 * should check it for you already; but if you don't, or
1488 * someone fooled it, we need to double check here)
1490 bdev
= blkdev_get_by_path(new_disk_conf
->meta_dev
,
1491 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
,
1492 (new_disk_conf
->meta_dev_idx
< 0) ?
1493 (void *)mdev
: (void *)drbd_m_holder
);
1495 dev_err(DEV
, "open(\"%s\") failed with %ld\n", new_disk_conf
->meta_dev
,
1497 retcode
= ERR_OPEN_MD_DISK
;
1500 nbc
->md_bdev
= bdev
;
1502 if ((nbc
->backing_bdev
== nbc
->md_bdev
) !=
1503 (new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_INTERNAL
||
1504 new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_FLEX_INT
)) {
1505 retcode
= ERR_MD_IDX_INVALID
;
1509 resync_lru
= lc_create("resync", drbd_bm_ext_cache
,
1510 1, 61, sizeof(struct bm_extent
),
1511 offsetof(struct bm_extent
, lce
));
1513 retcode
= ERR_NOMEM
;
1517 /* Read our meta data super block early.
1518 * This also sets other on-disk offsets. */
1519 retcode
= drbd_md_read(mdev
, nbc
);
1520 if (retcode
!= NO_ERROR
)
1523 if (new_disk_conf
->al_extents
< DRBD_AL_EXTENTS_MIN
)
1524 new_disk_conf
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1525 if (new_disk_conf
->al_extents
> drbd_al_extents_max(nbc
))
1526 new_disk_conf
->al_extents
= drbd_al_extents_max(nbc
);
1528 if (drbd_get_max_capacity(nbc
) < new_disk_conf
->disk_size
) {
1529 dev_err(DEV
, "max capacity %llu smaller than disk size %llu\n",
1530 (unsigned long long) drbd_get_max_capacity(nbc
),
1531 (unsigned long long) new_disk_conf
->disk_size
);
1532 retcode
= ERR_DISK_TOO_SMALL
;
1536 if (new_disk_conf
->meta_dev_idx
< 0) {
1537 max_possible_sectors
= DRBD_MAX_SECTORS_FLEX
;
1538 /* at least one MB, otherwise it does not make sense */
1539 min_md_device_sectors
= (2<<10);
1541 max_possible_sectors
= DRBD_MAX_SECTORS
;
1542 min_md_device_sectors
= MD_128MB_SECT
* (new_disk_conf
->meta_dev_idx
+ 1);
1545 if (drbd_get_capacity(nbc
->md_bdev
) < min_md_device_sectors
) {
1546 retcode
= ERR_MD_DISK_TOO_SMALL
;
1547 dev_warn(DEV
, "refusing attach: md-device too small, "
1548 "at least %llu sectors needed for this meta-disk type\n",
1549 (unsigned long long) min_md_device_sectors
);
1553 /* Make sure the new disk is big enough
1554 * (we may currently be R_PRIMARY with no local disk...) */
1555 if (drbd_get_max_capacity(nbc
) <
1556 drbd_get_capacity(mdev
->this_bdev
)) {
1557 retcode
= ERR_DISK_TOO_SMALL
;
1561 nbc
->known_size
= drbd_get_capacity(nbc
->backing_bdev
);
1563 if (nbc
->known_size
> max_possible_sectors
) {
1564 dev_warn(DEV
, "==> truncating very big lower level device "
1565 "to currently maximum possible %llu sectors <==\n",
1566 (unsigned long long) max_possible_sectors
);
1567 if (new_disk_conf
->meta_dev_idx
>= 0)
1568 dev_warn(DEV
, "==>> using internal or flexible "
1569 "meta data may help <<==\n");
1572 drbd_suspend_io(mdev
);
1573 /* also wait for the last barrier ack. */
1574 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1575 * We need a way to either ignore barrier acks for barriers sent before a device
1576 * was attached, or a way to wait for all pending barrier acks to come in.
1577 * As barriers are counted per resource,
1578 * we'd need to suspend io on all devices of a resource.
1580 wait_event(mdev
->misc_wait
, !atomic_read(&mdev
->ap_pending_cnt
) || drbd_suspended(mdev
));
1581 /* and for any other previously queued work */
1582 drbd_flush_workqueue(mdev
);
1584 rv
= _drbd_request_state(mdev
, NS(disk
, D_ATTACHING
), CS_VERBOSE
);
1585 retcode
= rv
; /* FIXME: Type mismatch. */
1586 drbd_resume_io(mdev
);
1587 if (rv
< SS_SUCCESS
)
1590 if (!get_ldev_if_state(mdev
, D_ATTACHING
))
1591 goto force_diskless
;
1593 if (!mdev
->bitmap
) {
1594 if (drbd_bm_init(mdev
)) {
1595 retcode
= ERR_NOMEM
;
1596 goto force_diskless_dec
;
1600 if (mdev
->state
.conn
< C_CONNECTED
&&
1601 mdev
->state
.role
== R_PRIMARY
&&
1602 (mdev
->ed_uuid
& ~((u64
)1)) != (nbc
->md
.uuid
[UI_CURRENT
] & ~((u64
)1))) {
1603 dev_err(DEV
, "Can only attach to data with current UUID=%016llX\n",
1604 (unsigned long long)mdev
->ed_uuid
);
1605 retcode
= ERR_DATA_NOT_CURRENT
;
1606 goto force_diskless_dec
;
1609 /* Since we are diskless, fix the activity log first... */
1610 if (drbd_check_al_size(mdev
, new_disk_conf
)) {
1611 retcode
= ERR_NOMEM
;
1612 goto force_diskless_dec
;
1615 /* Prevent shrinking of consistent devices ! */
1616 if (drbd_md_test_flag(nbc
, MDF_CONSISTENT
) &&
1617 drbd_new_dev_size(mdev
, nbc
, nbc
->disk_conf
->disk_size
, 0) < nbc
->md
.la_size_sect
) {
1618 dev_warn(DEV
, "refusing to truncate a consistent device\n");
1619 retcode
= ERR_DISK_TOO_SMALL
;
1620 goto force_diskless_dec
;
1623 /* Reset the "barriers don't work" bits here, then force meta data to
1624 * be written, to ensure we determine if barriers are supported. */
1625 if (new_disk_conf
->md_flushes
)
1626 clear_bit(MD_NO_FUA
, &mdev
->flags
);
1628 set_bit(MD_NO_FUA
, &mdev
->flags
);
1630 /* Point of no return reached.
1631 * Devices and memory are no longer released by error cleanup below.
1632 * now mdev takes over responsibility, and the state engine should
1633 * clean it up somewhere. */
1634 D_ASSERT(mdev
->ldev
== NULL
);
1636 mdev
->resync
= resync_lru
;
1637 mdev
->rs_plan_s
= new_plan
;
1640 new_disk_conf
= NULL
;
1643 drbd_bump_write_ordering(mdev
->tconn
, WO_bdev_flush
);
1645 if (drbd_md_test_flag(mdev
->ldev
, MDF_CRASHED_PRIMARY
))
1646 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1648 clear_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1650 if (drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1651 !(mdev
->state
.role
== R_PRIMARY
&& mdev
->tconn
->susp_nod
))
1652 set_bit(CRASHED_PRIMARY
, &mdev
->flags
);
1659 drbd_reconsider_max_bio_size(mdev
);
1661 /* If I am currently not R_PRIMARY,
1662 * but meta data primary indicator is set,
1663 * I just now recover from a hard crash,
1664 * and have been R_PRIMARY before that crash.
1666 * Now, if I had no connection before that crash
1667 * (have been degraded R_PRIMARY), chances are that
1668 * I won't find my peer now either.
1670 * In that case, and _only_ in that case,
1671 * we use the degr-wfc-timeout instead of the default,
1672 * so we can automatically recover from a crash of a
1673 * degraded but active "cluster" after a certain timeout.
1675 clear_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1676 if (mdev
->state
.role
!= R_PRIMARY
&&
1677 drbd_md_test_flag(mdev
->ldev
, MDF_PRIMARY_IND
) &&
1678 !drbd_md_test_flag(mdev
->ldev
, MDF_CONNECTED_IND
))
1679 set_bit(USE_DEGR_WFC_T
, &mdev
->flags
);
1681 dd
= drbd_determine_dev_size(mdev
, 0, NULL
);
1682 if (dd
<= DS_ERROR
) {
1683 retcode
= ERR_NOMEM_BITMAP
;
1684 goto force_diskless_dec
;
1685 } else if (dd
== DS_GREW
)
1686 set_bit(RESYNC_AFTER_NEG
, &mdev
->flags
);
1688 if (drbd_md_test_flag(mdev
->ldev
, MDF_FULL_SYNC
) ||
1689 (test_bit(CRASHED_PRIMARY
, &mdev
->flags
) &&
1690 drbd_md_test_flag(mdev
->ldev
, MDF_AL_DISABLED
))) {
1691 dev_info(DEV
, "Assuming that all blocks are out of sync "
1692 "(aka FullSync)\n");
1693 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
,
1694 "set_n_write from attaching", BM_LOCKED_MASK
)) {
1695 retcode
= ERR_IO_MD_DISK
;
1696 goto force_diskless_dec
;
1699 if (drbd_bitmap_io(mdev
, &drbd_bm_read
,
1700 "read from attaching", BM_LOCKED_MASK
)) {
1701 retcode
= ERR_IO_MD_DISK
;
1702 goto force_diskless_dec
;
1706 if (_drbd_bm_total_weight(mdev
) == drbd_bm_bits(mdev
))
1707 drbd_suspend_al(mdev
); /* IO is still suspended here... */
1709 spin_lock_irq(&mdev
->tconn
->req_lock
);
1710 os
= drbd_read_state(mdev
);
1712 /* If MDF_CONSISTENT is not set go into inconsistent state,
1713 otherwise investigate MDF_WasUpToDate...
1714 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1715 otherwise into D_CONSISTENT state.
1717 if (drbd_md_test_flag(mdev
->ldev
, MDF_CONSISTENT
)) {
1718 if (drbd_md_test_flag(mdev
->ldev
, MDF_WAS_UP_TO_DATE
))
1719 ns
.disk
= D_CONSISTENT
;
1721 ns
.disk
= D_OUTDATED
;
1723 ns
.disk
= D_INCONSISTENT
;
1726 if (drbd_md_test_flag(mdev
->ldev
, MDF_PEER_OUT_DATED
))
1727 ns
.pdsk
= D_OUTDATED
;
1730 if (ns
.disk
== D_CONSISTENT
&&
1731 (ns
.pdsk
== D_OUTDATED
|| rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
== FP_DONT_CARE
))
1732 ns
.disk
= D_UP_TO_DATE
;
1734 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1735 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1736 this point, because drbd_request_state() modifies these
1739 if (rcu_dereference(mdev
->ldev
->disk_conf
)->al_updates
)
1740 mdev
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
1742 mdev
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
1746 /* In case we are C_CONNECTED postpone any decision on the new disk
1747 state after the negotiation phase. */
1748 if (mdev
->state
.conn
== C_CONNECTED
) {
1749 mdev
->new_state_tmp
.i
= ns
.i
;
1751 ns
.disk
= D_NEGOTIATING
;
1753 /* We expect to receive up-to-date UUIDs soon.
1754 To avoid a race in receive_state, free p_uuid while
1755 holding req_lock. I.e. atomic with the state change */
1756 kfree(mdev
->p_uuid
);
1757 mdev
->p_uuid
= NULL
;
1760 rv
= _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
1761 spin_unlock_irq(&mdev
->tconn
->req_lock
);
1763 if (rv
< SS_SUCCESS
)
1764 goto force_diskless_dec
;
1766 mod_timer(&mdev
->request_timer
, jiffies
+ HZ
);
1768 if (mdev
->state
.role
== R_PRIMARY
)
1769 mdev
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
1771 mdev
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
1773 drbd_md_mark_dirty(mdev
);
1776 kobject_uevent(&disk_to_dev(mdev
->vdisk
)->kobj
, KOBJ_CHANGE
);
1778 conn_reconfig_done(mdev
->tconn
);
1779 drbd_adm_finish(info
, retcode
);
1785 drbd_force_state(mdev
, NS(disk
, D_DISKLESS
));
1788 conn_reconfig_done(mdev
->tconn
);
1790 if (nbc
->backing_bdev
)
1791 blkdev_put(nbc
->backing_bdev
,
1792 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1794 blkdev_put(nbc
->md_bdev
,
1795 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1798 kfree(new_disk_conf
);
1799 lc_destroy(resync_lru
);
1803 drbd_adm_finish(info
, retcode
);
1807 static int adm_detach(struct drbd_conf
*mdev
, int force
)
1809 enum drbd_state_rv retcode
;
1813 set_bit(FORCE_DETACH
, &mdev
->flags
);
1814 drbd_force_state(mdev
, NS(disk
, D_FAILED
));
1815 retcode
= SS_SUCCESS
;
1819 drbd_suspend_io(mdev
); /* so no-one is stuck in drbd_al_begin_io */
1820 drbd_md_get_buffer(mdev
); /* make sure there is no in-flight meta-data IO */
1821 retcode
= drbd_request_state(mdev
, NS(disk
, D_FAILED
));
1822 drbd_md_put_buffer(mdev
);
1823 /* D_FAILED will transition to DISKLESS. */
1824 ret
= wait_event_interruptible(mdev
->misc_wait
,
1825 mdev
->state
.disk
!= D_FAILED
);
1826 drbd_resume_io(mdev
);
1827 if ((int)retcode
== (int)SS_IS_DISKLESS
)
1828 retcode
= SS_NOTHING_TO_DO
;
1835 /* Detaching the disk is a process in multiple stages. First we need to lock
1836 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
1837 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
1838 * internal references as well.
1839 * Only then we have finally detached. */
1840 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
)
1842 enum drbd_ret_code retcode
;
1843 struct detach_parms parms
= { };
1846 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
1847 if (!adm_ctx
.reply_skb
)
1849 if (retcode
!= NO_ERROR
)
1852 if (info
->attrs
[DRBD_NLA_DETACH_PARMS
]) {
1853 err
= detach_parms_from_attrs(&parms
, info
);
1855 retcode
= ERR_MANDATORY_TAG
;
1856 drbd_msg_put_info(from_attrs_err_to_txt(err
));
1861 retcode
= adm_detach(adm_ctx
.mdev
, parms
.force_detach
);
1863 drbd_adm_finish(info
, retcode
);
1867 static bool conn_resync_running(struct drbd_tconn
*tconn
)
1869 struct drbd_conf
*mdev
;
1874 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1875 if (mdev
->state
.conn
== C_SYNC_SOURCE
||
1876 mdev
->state
.conn
== C_SYNC_TARGET
||
1877 mdev
->state
.conn
== C_PAUSED_SYNC_S
||
1878 mdev
->state
.conn
== C_PAUSED_SYNC_T
) {
1888 static bool conn_ov_running(struct drbd_tconn
*tconn
)
1890 struct drbd_conf
*mdev
;
1895 idr_for_each_entry(&tconn
->volumes
, mdev
, vnr
) {
1896 if (mdev
->state
.conn
== C_VERIFY_S
||
1897 mdev
->state
.conn
== C_VERIFY_T
) {
1907 static enum drbd_ret_code
1908 _check_net_options(struct drbd_tconn
*tconn
, struct net_conf
*old_conf
, struct net_conf
*new_conf
)
1910 struct drbd_conf
*mdev
;
1913 if (old_conf
&& tconn
->cstate
== C_WF_REPORT_PARAMS
&& tconn
->agreed_pro_version
< 100) {
1914 if (new_conf
->wire_protocol
!= old_conf
->wire_protocol
)
1915 return ERR_NEED_APV_100
;
1917 if (new_conf
->two_primaries
!= old_conf
->two_primaries
)
1918 return ERR_NEED_APV_100
;
1920 if (strcmp(new_conf
->integrity_alg
, old_conf
->integrity_alg
))
1921 return ERR_NEED_APV_100
;
1924 if (!new_conf
->two_primaries
&&
1925 conn_highest_role(tconn
) == R_PRIMARY
&&
1926 conn_highest_peer(tconn
) == R_PRIMARY
)
1927 return ERR_NEED_ALLOW_TWO_PRI
;
1929 if (new_conf
->two_primaries
&&
1930 (new_conf
->wire_protocol
!= DRBD_PROT_C
))
1931 return ERR_NOT_PROTO_C
;
1933 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
1934 if (get_ldev(mdev
)) {
1935 enum drbd_fencing_p fp
= rcu_dereference(mdev
->ldev
->disk_conf
)->fencing
;
1937 if (new_conf
->wire_protocol
== DRBD_PROT_A
&& fp
== FP_STONITH
)
1938 return ERR_STONITH_AND_PROT_A
;
1940 if (mdev
->state
.role
== R_PRIMARY
&& new_conf
->discard_my_data
)
1941 return ERR_DISCARD_IMPOSSIBLE
;
1944 if (new_conf
->on_congestion
!= OC_BLOCK
&& new_conf
->wire_protocol
!= DRBD_PROT_A
)
1945 return ERR_CONG_NOT_PROTO_A
;
1950 static enum drbd_ret_code
1951 check_net_options(struct drbd_tconn
*tconn
, struct net_conf
*new_conf
)
1953 static enum drbd_ret_code rv
;
1954 struct drbd_conf
*mdev
;
1958 rv
= _check_net_options(tconn
, rcu_dereference(tconn
->net_conf
), new_conf
);
1961 /* tconn->volumes protected by genl_lock() here */
1962 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
1963 if (!mdev
->bitmap
) {
1964 if(drbd_bm_init(mdev
))
1973 struct crypto_hash
*verify_tfm
;
1974 struct crypto_hash
*csums_tfm
;
1975 struct crypto_hash
*cram_hmac_tfm
;
1976 struct crypto_hash
*integrity_tfm
;
1980 alloc_hash(struct crypto_hash
**tfm
, char *tfm_name
, int err_alg
)
1985 *tfm
= crypto_alloc_hash(tfm_name
, 0, CRYPTO_ALG_ASYNC
);
1994 static enum drbd_ret_code
1995 alloc_crypto(struct crypto
*crypto
, struct net_conf
*new_conf
)
1997 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
1998 enum drbd_ret_code rv
;
2000 rv
= alloc_hash(&crypto
->csums_tfm
, new_conf
->csums_alg
,
2004 rv
= alloc_hash(&crypto
->verify_tfm
, new_conf
->verify_alg
,
2008 rv
= alloc_hash(&crypto
->integrity_tfm
, new_conf
->integrity_alg
,
2012 if (new_conf
->cram_hmac_alg
[0] != 0) {
2013 snprintf(hmac_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)",
2014 new_conf
->cram_hmac_alg
);
2016 rv
= alloc_hash(&crypto
->cram_hmac_tfm
, hmac_name
,
2023 static void free_crypto(struct crypto
*crypto
)
2025 crypto_free_hash(crypto
->cram_hmac_tfm
);
2026 crypto_free_hash(crypto
->integrity_tfm
);
2027 crypto_free_hash(crypto
->csums_tfm
);
2028 crypto_free_hash(crypto
->verify_tfm
);
2031 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2033 enum drbd_ret_code retcode
;
2034 struct drbd_tconn
*tconn
;
2035 struct net_conf
*old_conf
, *new_conf
= NULL
;
2037 int ovr
; /* online verify running */
2038 int rsr
; /* re-sync running */
2039 struct crypto crypto
= { };
2041 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2042 if (!adm_ctx
.reply_skb
)
2044 if (retcode
!= NO_ERROR
)
2047 tconn
= adm_ctx
.tconn
;
2049 new_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
2051 retcode
= ERR_NOMEM
;
2055 conn_reconfig_start(tconn
);
2057 mutex_lock(&tconn
->data
.mutex
);
2058 mutex_lock(&tconn
->conf_update
);
2059 old_conf
= tconn
->net_conf
;
2062 drbd_msg_put_info("net conf missing, try connect");
2063 retcode
= ERR_INVALID_REQUEST
;
2067 *new_conf
= *old_conf
;
2068 if (should_set_defaults(info
))
2069 set_net_conf_defaults(new_conf
);
2071 err
= net_conf_from_attrs_for_change(new_conf
, info
);
2072 if (err
&& err
!= -ENOMSG
) {
2073 retcode
= ERR_MANDATORY_TAG
;
2074 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2078 retcode
= check_net_options(tconn
, new_conf
);
2079 if (retcode
!= NO_ERROR
)
2082 /* re-sync running */
2083 rsr
= conn_resync_running(tconn
);
2084 if (rsr
&& strcmp(new_conf
->csums_alg
, old_conf
->csums_alg
)) {
2085 retcode
= ERR_CSUMS_RESYNC_RUNNING
;
2089 /* online verify running */
2090 ovr
= conn_ov_running(tconn
);
2091 if (ovr
&& strcmp(new_conf
->verify_alg
, old_conf
->verify_alg
)) {
2092 retcode
= ERR_VERIFY_RUNNING
;
2096 retcode
= alloc_crypto(&crypto
, new_conf
);
2097 if (retcode
!= NO_ERROR
)
2100 rcu_assign_pointer(tconn
->net_conf
, new_conf
);
2103 crypto_free_hash(tconn
->csums_tfm
);
2104 tconn
->csums_tfm
= crypto
.csums_tfm
;
2105 crypto
.csums_tfm
= NULL
;
2108 crypto_free_hash(tconn
->verify_tfm
);
2109 tconn
->verify_tfm
= crypto
.verify_tfm
;
2110 crypto
.verify_tfm
= NULL
;
2113 crypto_free_hash(tconn
->integrity_tfm
);
2114 tconn
->integrity_tfm
= crypto
.integrity_tfm
;
2115 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
&& tconn
->agreed_pro_version
>= 100)
2116 /* Do this without trying to take tconn->data.mutex again. */
2117 __drbd_send_protocol(tconn
, P_PROTOCOL_UPDATE
);
2119 crypto_free_hash(tconn
->cram_hmac_tfm
);
2120 tconn
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2122 mutex_unlock(&tconn
->conf_update
);
2123 mutex_unlock(&tconn
->data
.mutex
);
2127 if (tconn
->cstate
>= C_WF_REPORT_PARAMS
)
2128 drbd_send_sync_param(minor_to_mdev(conn_lowest_minor(tconn
)));
2133 mutex_unlock(&tconn
->conf_update
);
2134 mutex_unlock(&tconn
->data
.mutex
);
2135 free_crypto(&crypto
);
2138 conn_reconfig_done(tconn
);
2140 drbd_adm_finish(info
, retcode
);
2144 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
)
2146 struct drbd_conf
*mdev
;
2147 struct net_conf
*old_conf
, *new_conf
= NULL
;
2148 struct crypto crypto
= { };
2149 struct drbd_tconn
*tconn
;
2150 enum drbd_ret_code retcode
;
2154 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2156 if (!adm_ctx
.reply_skb
)
2158 if (retcode
!= NO_ERROR
)
2160 if (!(adm_ctx
.my_addr
&& adm_ctx
.peer_addr
)) {
2161 drbd_msg_put_info("connection endpoint(s) missing");
2162 retcode
= ERR_INVALID_REQUEST
;
2166 /* No need for _rcu here. All reconfiguration is
2167 * strictly serialized on genl_lock(). We are protected against
2168 * concurrent reconfiguration/addition/deletion */
2169 list_for_each_entry(tconn
, &drbd_tconns
, all_tconn
) {
2170 if (nla_len(adm_ctx
.my_addr
) == tconn
->my_addr_len
&&
2171 !memcmp(nla_data(adm_ctx
.my_addr
), &tconn
->my_addr
, tconn
->my_addr_len
)) {
2172 retcode
= ERR_LOCAL_ADDR
;
2176 if (nla_len(adm_ctx
.peer_addr
) == tconn
->peer_addr_len
&&
2177 !memcmp(nla_data(adm_ctx
.peer_addr
), &tconn
->peer_addr
, tconn
->peer_addr_len
)) {
2178 retcode
= ERR_PEER_ADDR
;
2183 tconn
= adm_ctx
.tconn
;
2184 conn_reconfig_start(tconn
);
2186 if (tconn
->cstate
> C_STANDALONE
) {
2187 retcode
= ERR_NET_CONFIGURED
;
2191 /* allocation not in the IO path, drbdsetup / netlink process context */
2192 new_conf
= kzalloc(sizeof(*new_conf
), GFP_KERNEL
);
2194 retcode
= ERR_NOMEM
;
2198 set_net_conf_defaults(new_conf
);
2200 err
= net_conf_from_attrs(new_conf
, info
);
2201 if (err
&& err
!= -ENOMSG
) {
2202 retcode
= ERR_MANDATORY_TAG
;
2203 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2207 retcode
= check_net_options(tconn
, new_conf
);
2208 if (retcode
!= NO_ERROR
)
2211 retcode
= alloc_crypto(&crypto
, new_conf
);
2212 if (retcode
!= NO_ERROR
)
2215 ((char *)new_conf
->shared_secret
)[SHARED_SECRET_MAX
-1] = 0;
2217 conn_flush_workqueue(tconn
);
2219 mutex_lock(&tconn
->conf_update
);
2220 old_conf
= tconn
->net_conf
;
2222 retcode
= ERR_NET_CONFIGURED
;
2223 mutex_unlock(&tconn
->conf_update
);
2226 rcu_assign_pointer(tconn
->net_conf
, new_conf
);
2228 conn_free_crypto(tconn
);
2229 tconn
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2230 tconn
->integrity_tfm
= crypto
.integrity_tfm
;
2231 tconn
->csums_tfm
= crypto
.csums_tfm
;
2232 tconn
->verify_tfm
= crypto
.verify_tfm
;
2234 tconn
->my_addr_len
= nla_len(adm_ctx
.my_addr
);
2235 memcpy(&tconn
->my_addr
, nla_data(adm_ctx
.my_addr
), tconn
->my_addr_len
);
2236 tconn
->peer_addr_len
= nla_len(adm_ctx
.peer_addr
);
2237 memcpy(&tconn
->peer_addr
, nla_data(adm_ctx
.peer_addr
), tconn
->peer_addr_len
);
2239 mutex_unlock(&tconn
->conf_update
);
2242 idr_for_each_entry(&tconn
->volumes
, mdev
, i
) {
2248 retcode
= conn_request_state(tconn
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
2250 conn_reconfig_done(tconn
);
2251 drbd_adm_finish(info
, retcode
);
2255 free_crypto(&crypto
);
2258 conn_reconfig_done(tconn
);
2260 drbd_adm_finish(info
, retcode
);
2264 static enum drbd_state_rv
conn_try_disconnect(struct drbd_tconn
*tconn
, bool force
)
2266 enum drbd_state_rv rv
;
2268 rv
= conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
),
2269 force
? CS_HARD
: 0);
2272 case SS_NOTHING_TO_DO
:
2274 case SS_ALREADY_STANDALONE
:
2276 case SS_PRIMARY_NOP
:
2277 /* Our state checking code wants to see the peer outdated. */
2278 rv
= conn_request_state(tconn
, NS2(conn
, C_DISCONNECTING
, pdsk
, D_OUTDATED
), 0);
2280 if (rv
== SS_OUTDATE_WO_CONN
) /* lost connection before graceful disconnect succeeded */
2281 rv
= conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
), CS_VERBOSE
);
2284 case SS_CW_FAILED_BY_PEER
:
2285 /* The peer probably wants to see us outdated. */
2286 rv
= conn_request_state(tconn
, NS2(conn
, C_DISCONNECTING
,
2287 disk
, D_OUTDATED
), 0);
2288 if (rv
== SS_IS_DISKLESS
|| rv
== SS_LOWER_THAN_OUTDATED
) {
2289 rv
= conn_request_state(tconn
, NS(conn
, C_DISCONNECTING
),
2294 /* no special handling necessary */
2297 if (rv
>= SS_SUCCESS
) {
2298 enum drbd_state_rv rv2
;
2299 /* No one else can reconfigure the network while I am here.
2300 * The state handling only uses drbd_thread_stop_nowait(),
2301 * we want to really wait here until the receiver is no more.
2303 drbd_thread_stop(&adm_ctx
.tconn
->receiver
);
2305 /* Race breaker. This additional state change request may be
2306 * necessary, if this was a forced disconnect during a receiver
2307 * restart. We may have "killed" the receiver thread just
2308 * after drbdd_init() returned. Typically, we should be
2309 * C_STANDALONE already, now, and this becomes a no-op.
2311 rv2
= conn_request_state(tconn
, NS(conn
, C_STANDALONE
),
2312 CS_VERBOSE
| CS_HARD
);
2313 if (rv2
< SS_SUCCESS
)
2315 "unexpected rv2=%d in conn_try_disconnect()\n",
2321 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2323 struct disconnect_parms parms
;
2324 struct drbd_tconn
*tconn
;
2325 enum drbd_state_rv rv
;
2326 enum drbd_ret_code retcode
;
2329 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2330 if (!adm_ctx
.reply_skb
)
2332 if (retcode
!= NO_ERROR
)
2335 tconn
= adm_ctx
.tconn
;
2336 memset(&parms
, 0, sizeof(parms
));
2337 if (info
->attrs
[DRBD_NLA_DISCONNECT_PARMS
]) {
2338 err
= disconnect_parms_from_attrs(&parms
, info
);
2340 retcode
= ERR_MANDATORY_TAG
;
2341 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2346 rv
= conn_try_disconnect(tconn
, parms
.force_disconnect
);
2347 if (rv
< SS_SUCCESS
)
2348 retcode
= rv
; /* FIXME: Type mismatch. */
2352 drbd_adm_finish(info
, retcode
);
2356 void resync_after_online_grow(struct drbd_conf
*mdev
)
2358 int iass
; /* I am sync source */
2360 dev_info(DEV
, "Resync of new storage after online grow\n");
2361 if (mdev
->state
.role
!= mdev
->state
.peer
)
2362 iass
= (mdev
->state
.role
== R_PRIMARY
);
2364 iass
= test_bit(RESOLVE_CONFLICTS
, &mdev
->tconn
->flags
);
2367 drbd_start_resync(mdev
, C_SYNC_SOURCE
);
2369 _drbd_request_state(mdev
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
+ CS_SERIALIZE
);
2372 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
)
2374 struct disk_conf
*old_disk_conf
, *new_disk_conf
= NULL
;
2375 struct resize_parms rs
;
2376 struct drbd_conf
*mdev
;
2377 enum drbd_ret_code retcode
;
2378 enum determine_dev_size dd
;
2379 bool change_al_layout
= false;
2380 enum dds_flags ddsf
;
2384 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2385 if (!adm_ctx
.reply_skb
)
2387 if (retcode
!= NO_ERROR
)
2390 mdev
= adm_ctx
.mdev
;
2391 if (!get_ldev(mdev
)) {
2392 retcode
= ERR_NO_DISK
;
2396 memset(&rs
, 0, sizeof(struct resize_parms
));
2397 rs
.al_stripes
= mdev
->ldev
->md
.al_stripes
;
2398 rs
.al_stripe_size
= mdev
->ldev
->md
.al_stripe_size_4k
* 4;
2399 if (info
->attrs
[DRBD_NLA_RESIZE_PARMS
]) {
2400 err
= resize_parms_from_attrs(&rs
, info
);
2402 retcode
= ERR_MANDATORY_TAG
;
2403 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2408 if (mdev
->state
.conn
> C_CONNECTED
) {
2409 retcode
= ERR_RESIZE_RESYNC
;
2413 if (mdev
->state
.role
== R_SECONDARY
&&
2414 mdev
->state
.peer
== R_SECONDARY
) {
2415 retcode
= ERR_NO_PRIMARY
;
2419 if (rs
.no_resync
&& mdev
->tconn
->agreed_pro_version
< 93) {
2420 retcode
= ERR_NEED_APV_93
;
2425 u_size
= rcu_dereference(mdev
->ldev
->disk_conf
)->disk_size
;
2427 if (u_size
!= (sector_t
)rs
.resize_size
) {
2428 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
2429 if (!new_disk_conf
) {
2430 retcode
= ERR_NOMEM
;
2435 if (mdev
->ldev
->md
.al_stripes
!= rs
.al_stripes
||
2436 mdev
->ldev
->md
.al_stripe_size_4k
!= rs
.al_stripe_size
/ 4) {
2437 u32 al_size_k
= rs
.al_stripes
* rs
.al_stripe_size
;
2439 if (al_size_k
> (16 * 1024 * 1024)) {
2440 retcode
= ERR_MD_LAYOUT_TOO_BIG
;
2444 if (al_size_k
< MD_32kB_SECT
/2) {
2445 retcode
= ERR_MD_LAYOUT_TOO_SMALL
;
2449 if (mdev
->state
.conn
!= C_CONNECTED
) {
2450 retcode
= ERR_MD_LAYOUT_CONNECTED
;
2454 change_al_layout
= true;
2457 if (mdev
->ldev
->known_size
!= drbd_get_capacity(mdev
->ldev
->backing_bdev
))
2458 mdev
->ldev
->known_size
= drbd_get_capacity(mdev
->ldev
->backing_bdev
);
2460 if (new_disk_conf
) {
2461 mutex_lock(&mdev
->tconn
->conf_update
);
2462 old_disk_conf
= mdev
->ldev
->disk_conf
;
2463 *new_disk_conf
= *old_disk_conf
;
2464 new_disk_conf
->disk_size
= (sector_t
)rs
.resize_size
;
2465 rcu_assign_pointer(mdev
->ldev
->disk_conf
, new_disk_conf
);
2466 mutex_unlock(&mdev
->tconn
->conf_update
);
2468 kfree(old_disk_conf
);
2471 ddsf
= (rs
.resize_force
? DDSF_FORCED
: 0) | (rs
.no_resync
? DDSF_NO_RESYNC
: 0);
2472 dd
= drbd_determine_dev_size(mdev
, ddsf
, change_al_layout
? &rs
: NULL
);
2475 if (dd
== DS_ERROR
) {
2476 retcode
= ERR_NOMEM_BITMAP
;
2478 } else if (dd
== DS_ERROR_SPACE_MD
) {
2479 retcode
= ERR_MD_LAYOUT_NO_FIT
;
2481 } else if (dd
== DS_ERROR_SHRINK
) {
2482 retcode
= ERR_IMPLICIT_SHRINK
;
2486 if (mdev
->state
.conn
== C_CONNECTED
) {
2488 set_bit(RESIZE_PENDING
, &mdev
->flags
);
2490 drbd_send_uuids(mdev
);
2491 drbd_send_sizes(mdev
, 1, ddsf
);
2495 drbd_adm_finish(info
, retcode
);
2503 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2505 enum drbd_ret_code retcode
;
2506 struct drbd_tconn
*tconn
;
2507 struct res_opts res_opts
;
2510 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2511 if (!adm_ctx
.reply_skb
)
2513 if (retcode
!= NO_ERROR
)
2515 tconn
= adm_ctx
.tconn
;
2517 res_opts
= tconn
->res_opts
;
2518 if (should_set_defaults(info
))
2519 set_res_opts_defaults(&res_opts
);
2521 err
= res_opts_from_attrs(&res_opts
, info
);
2522 if (err
&& err
!= -ENOMSG
) {
2523 retcode
= ERR_MANDATORY_TAG
;
2524 drbd_msg_put_info(from_attrs_err_to_txt(err
));
2528 err
= set_resource_options(tconn
, &res_opts
);
2530 retcode
= ERR_INVALID_REQUEST
;
2532 retcode
= ERR_NOMEM
;
2536 drbd_adm_finish(info
, retcode
);
2540 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
)
2542 struct drbd_conf
*mdev
;
2543 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2545 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2546 if (!adm_ctx
.reply_skb
)
2548 if (retcode
!= NO_ERROR
)
2551 mdev
= adm_ctx
.mdev
;
2553 /* If there is still bitmap IO pending, probably because of a previous
2554 * resync just being finished, wait for it before requesting a new resync.
2555 * Also wait for it's after_state_ch(). */
2556 drbd_suspend_io(mdev
);
2557 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2558 drbd_flush_workqueue(mdev
);
2560 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2561 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2562 * try to start a resync handshake as sync target for full sync.
2564 if (mdev
->state
.conn
== C_STANDALONE
&& mdev
->state
.role
== R_SECONDARY
) {
2565 retcode
= drbd_request_state(mdev
, NS(disk
, D_INCONSISTENT
));
2566 if (retcode
>= SS_SUCCESS
) {
2567 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_n_write
,
2568 "set_n_write from invalidate", BM_LOCKED_MASK
))
2569 retcode
= ERR_IO_MD_DISK
;
2572 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_T
));
2573 drbd_resume_io(mdev
);
2576 drbd_adm_finish(info
, retcode
);
2580 static int drbd_adm_simple_request_state(struct sk_buff
*skb
, struct genl_info
*info
,
2581 union drbd_state mask
, union drbd_state val
)
2583 enum drbd_ret_code retcode
;
2585 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2586 if (!adm_ctx
.reply_skb
)
2588 if (retcode
!= NO_ERROR
)
2591 retcode
= drbd_request_state(adm_ctx
.mdev
, mask
, val
);
2593 drbd_adm_finish(info
, retcode
);
2597 static int drbd_bmio_set_susp_al(struct drbd_conf
*mdev
)
2601 rv
= drbd_bmio_set_n_write(mdev
);
2602 drbd_suspend_al(mdev
);
2606 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
)
2608 int retcode
; /* drbd_ret_code, drbd_state_rv */
2609 struct drbd_conf
*mdev
;
2611 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2612 if (!adm_ctx
.reply_skb
)
2614 if (retcode
!= NO_ERROR
)
2617 mdev
= adm_ctx
.mdev
;
2619 /* If there is still bitmap IO pending, probably because of a previous
2620 * resync just being finished, wait for it before requesting a new resync.
2621 * Also wait for it's after_state_ch(). */
2622 drbd_suspend_io(mdev
);
2623 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
2624 drbd_flush_workqueue(mdev
);
2626 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2627 * in the bitmap. Otherwise, try to start a resync handshake
2628 * as sync source for full sync.
2630 if (mdev
->state
.conn
== C_STANDALONE
&& mdev
->state
.role
== R_PRIMARY
) {
2631 /* The peer will get a resync upon connect anyways. Just make that
2632 into a full resync. */
2633 retcode
= drbd_request_state(mdev
, NS(pdsk
, D_INCONSISTENT
));
2634 if (retcode
>= SS_SUCCESS
) {
2635 if (drbd_bitmap_io(mdev
, &drbd_bmio_set_susp_al
,
2636 "set_n_write from invalidate_peer",
2637 BM_LOCKED_SET_ALLOWED
))
2638 retcode
= ERR_IO_MD_DISK
;
2641 retcode
= drbd_request_state(mdev
, NS(conn
, C_STARTING_SYNC_S
));
2642 drbd_resume_io(mdev
);
2645 drbd_adm_finish(info
, retcode
);
2649 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2651 enum drbd_ret_code retcode
;
2653 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2654 if (!adm_ctx
.reply_skb
)
2656 if (retcode
!= NO_ERROR
)
2659 if (drbd_request_state(adm_ctx
.mdev
, NS(user_isp
, 1)) == SS_NOTHING_TO_DO
)
2660 retcode
= ERR_PAUSE_IS_SET
;
2662 drbd_adm_finish(info
, retcode
);
2666 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2668 union drbd_dev_state s
;
2669 enum drbd_ret_code retcode
;
2671 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2672 if (!adm_ctx
.reply_skb
)
2674 if (retcode
!= NO_ERROR
)
2677 if (drbd_request_state(adm_ctx
.mdev
, NS(user_isp
, 0)) == SS_NOTHING_TO_DO
) {
2678 s
= adm_ctx
.mdev
->state
;
2679 if (s
.conn
== C_PAUSED_SYNC_S
|| s
.conn
== C_PAUSED_SYNC_T
) {
2680 retcode
= s
.aftr_isp
? ERR_PIC_AFTER_DEP
:
2681 s
.peer_isp
? ERR_PIC_PEER_DEP
: ERR_PAUSE_IS_CLEAR
;
2683 retcode
= ERR_PAUSE_IS_CLEAR
;
2688 drbd_adm_finish(info
, retcode
);
2692 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
)
2694 return drbd_adm_simple_request_state(skb
, info
, NS(susp
, 1));
2697 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
)
2699 struct drbd_conf
*mdev
;
2700 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2702 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2703 if (!adm_ctx
.reply_skb
)
2705 if (retcode
!= NO_ERROR
)
2708 mdev
= adm_ctx
.mdev
;
2709 if (test_bit(NEW_CUR_UUID
, &mdev
->flags
)) {
2710 drbd_uuid_new_current(mdev
);
2711 clear_bit(NEW_CUR_UUID
, &mdev
->flags
);
2713 drbd_suspend_io(mdev
);
2714 retcode
= drbd_request_state(mdev
, NS3(susp
, 0, susp_nod
, 0, susp_fen
, 0));
2715 if (retcode
== SS_SUCCESS
) {
2716 if (mdev
->state
.conn
< C_CONNECTED
)
2717 tl_clear(mdev
->tconn
);
2718 if (mdev
->state
.disk
== D_DISKLESS
|| mdev
->state
.disk
== D_FAILED
)
2719 tl_restart(mdev
->tconn
, FAIL_FROZEN_DISK_IO
);
2721 drbd_resume_io(mdev
);
2724 drbd_adm_finish(info
, retcode
);
2728 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
)
2730 return drbd_adm_simple_request_state(skb
, info
, NS(disk
, D_OUTDATED
));
2733 int nla_put_drbd_cfg_context(struct sk_buff
*skb
, struct drbd_tconn
*tconn
, unsigned vnr
)
2736 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_CONTEXT
);
2738 goto nla_put_failure
;
2739 if (vnr
!= VOLUME_UNSPECIFIED
&&
2740 nla_put_u32(skb
, T_ctx_volume
, vnr
))
2741 goto nla_put_failure
;
2742 if (nla_put_string(skb
, T_ctx_resource_name
, tconn
->name
))
2743 goto nla_put_failure
;
2744 if (tconn
->my_addr_len
&&
2745 nla_put(skb
, T_ctx_my_addr
, tconn
->my_addr_len
, &tconn
->my_addr
))
2746 goto nla_put_failure
;
2747 if (tconn
->peer_addr_len
&&
2748 nla_put(skb
, T_ctx_peer_addr
, tconn
->peer_addr_len
, &tconn
->peer_addr
))
2749 goto nla_put_failure
;
2750 nla_nest_end(skb
, nla
);
2755 nla_nest_cancel(skb
, nla
);
2759 int nla_put_status_info(struct sk_buff
*skb
, struct drbd_conf
*mdev
,
2760 const struct sib_info
*sib
)
2762 struct state_info
*si
= NULL
; /* for sizeof(si->member); */
2766 int exclude_sensitive
;
2768 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
2769 * to. So we better exclude_sensitive information.
2771 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
2772 * in the context of the requesting user process. Exclude sensitive
2773 * information, unless current has superuser.
2775 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
2776 * relies on the current implementation of netlink_dump(), which
2777 * executes the dump callback successively from netlink_recvmsg(),
2778 * always in the context of the receiving process */
2779 exclude_sensitive
= sib
|| !capable(CAP_SYS_ADMIN
);
2781 got_ldev
= get_ldev(mdev
);
2783 /* We need to add connection name and volume number information still.
2784 * Minor number is in drbd_genlmsghdr. */
2785 if (nla_put_drbd_cfg_context(skb
, mdev
->tconn
, mdev
->vnr
))
2786 goto nla_put_failure
;
2788 if (res_opts_to_skb(skb
, &mdev
->tconn
->res_opts
, exclude_sensitive
))
2789 goto nla_put_failure
;
2793 struct disk_conf
*disk_conf
;
2795 disk_conf
= rcu_dereference(mdev
->ldev
->disk_conf
);
2796 err
= disk_conf_to_skb(skb
, disk_conf
, exclude_sensitive
);
2799 struct net_conf
*nc
;
2801 nc
= rcu_dereference(mdev
->tconn
->net_conf
);
2803 err
= net_conf_to_skb(skb
, nc
, exclude_sensitive
);
2807 goto nla_put_failure
;
2809 nla
= nla_nest_start(skb
, DRBD_NLA_STATE_INFO
);
2811 goto nla_put_failure
;
2812 if (nla_put_u32(skb
, T_sib_reason
, sib
? sib
->sib_reason
: SIB_GET_STATUS_REPLY
) ||
2813 nla_put_u32(skb
, T_current_state
, mdev
->state
.i
) ||
2814 nla_put_u64(skb
, T_ed_uuid
, mdev
->ed_uuid
) ||
2815 nla_put_u64(skb
, T_capacity
, drbd_get_capacity(mdev
->this_bdev
)) ||
2816 nla_put_u64(skb
, T_send_cnt
, mdev
->send_cnt
) ||
2817 nla_put_u64(skb
, T_recv_cnt
, mdev
->recv_cnt
) ||
2818 nla_put_u64(skb
, T_read_cnt
, mdev
->read_cnt
) ||
2819 nla_put_u64(skb
, T_writ_cnt
, mdev
->writ_cnt
) ||
2820 nla_put_u64(skb
, T_al_writ_cnt
, mdev
->al_writ_cnt
) ||
2821 nla_put_u64(skb
, T_bm_writ_cnt
, mdev
->bm_writ_cnt
) ||
2822 nla_put_u32(skb
, T_ap_bio_cnt
, atomic_read(&mdev
->ap_bio_cnt
)) ||
2823 nla_put_u32(skb
, T_ap_pending_cnt
, atomic_read(&mdev
->ap_pending_cnt
)) ||
2824 nla_put_u32(skb
, T_rs_pending_cnt
, atomic_read(&mdev
->rs_pending_cnt
)))
2825 goto nla_put_failure
;
2830 spin_lock_irq(&mdev
->ldev
->md
.uuid_lock
);
2831 err
= nla_put(skb
, T_uuids
, sizeof(si
->uuids
), mdev
->ldev
->md
.uuid
);
2832 spin_unlock_irq(&mdev
->ldev
->md
.uuid_lock
);
2835 goto nla_put_failure
;
2837 if (nla_put_u32(skb
, T_disk_flags
, mdev
->ldev
->md
.flags
) ||
2838 nla_put_u64(skb
, T_bits_total
, drbd_bm_bits(mdev
)) ||
2839 nla_put_u64(skb
, T_bits_oos
, drbd_bm_total_weight(mdev
)))
2840 goto nla_put_failure
;
2841 if (C_SYNC_SOURCE
<= mdev
->state
.conn
&&
2842 C_PAUSED_SYNC_T
>= mdev
->state
.conn
) {
2843 if (nla_put_u64(skb
, T_bits_rs_total
, mdev
->rs_total
) ||
2844 nla_put_u64(skb
, T_bits_rs_failed
, mdev
->rs_failed
))
2845 goto nla_put_failure
;
2850 switch(sib
->sib_reason
) {
2851 case SIB_SYNC_PROGRESS
:
2852 case SIB_GET_STATUS_REPLY
:
2854 case SIB_STATE_CHANGE
:
2855 if (nla_put_u32(skb
, T_prev_state
, sib
->os
.i
) ||
2856 nla_put_u32(skb
, T_new_state
, sib
->ns
.i
))
2857 goto nla_put_failure
;
2859 case SIB_HELPER_POST
:
2860 if (nla_put_u32(skb
, T_helper_exit_code
,
2861 sib
->helper_exit_code
))
2862 goto nla_put_failure
;
2864 case SIB_HELPER_PRE
:
2865 if (nla_put_string(skb
, T_helper
, sib
->helper_name
))
2866 goto nla_put_failure
;
2870 nla_nest_end(skb
, nla
);
2880 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
)
2882 enum drbd_ret_code retcode
;
2885 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
2886 if (!adm_ctx
.reply_skb
)
2888 if (retcode
!= NO_ERROR
)
2891 err
= nla_put_status_info(adm_ctx
.reply_skb
, adm_ctx
.mdev
, NULL
);
2893 nlmsg_free(adm_ctx
.reply_skb
);
2897 drbd_adm_finish(info
, retcode
);
2901 int get_one_status(struct sk_buff
*skb
, struct netlink_callback
*cb
)
2903 struct drbd_conf
*mdev
;
2904 struct drbd_genlmsghdr
*dh
;
2905 struct drbd_tconn
*pos
= (struct drbd_tconn
*)cb
->args
[0];
2906 struct drbd_tconn
*tconn
= NULL
;
2907 struct drbd_tconn
*tmp
;
2908 unsigned volume
= cb
->args
[1];
2910 /* Open coded, deferred, iteration:
2911 * list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) {
2912 * idr_for_each_entry(&tconn->volumes, mdev, i) {
2916 * where tconn is cb->args[0];
2917 * and i is cb->args[1];
2919 * cb->args[2] indicates if we shall loop over all resources,
2920 * or just dump all volumes of a single resource.
2922 * This may miss entries inserted after this dump started,
2923 * or entries deleted before they are reached.
2925 * We need to make sure the mdev won't disappear while
2926 * we are looking at it, and revalidate our iterators
2927 * on each iteration.
2930 /* synchronize with conn_create()/conn_destroy() */
2932 /* revalidate iterator position */
2933 list_for_each_entry_rcu(tmp
, &drbd_tconns
, all_tconn
) {
2935 /* first iteration */
2947 mdev
= idr_get_next(&tconn
->volumes
, &volume
);
2949 /* No more volumes to dump on this tconn.
2950 * Advance tconn iterator. */
2951 pos
= list_entry_rcu(tconn
->all_tconn
.next
,
2952 struct drbd_tconn
, all_tconn
);
2953 /* Did we dump any volume on this tconn yet? */
2955 /* If we reached the end of the list,
2956 * or only a single resource dump was requested,
2958 if (&pos
->all_tconn
== &drbd_tconns
|| cb
->args
[2])
2966 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
2967 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
2968 NLM_F_MULTI
, DRBD_ADM_GET_STATUS
);
2973 /* This is a tconn without a single volume.
2974 * Suprisingly enough, it may have a network
2976 struct net_conf
*nc
;
2978 dh
->ret_code
= NO_ERROR
;
2979 if (nla_put_drbd_cfg_context(skb
, tconn
, VOLUME_UNSPECIFIED
))
2981 nc
= rcu_dereference(tconn
->net_conf
);
2982 if (nc
&& net_conf_to_skb(skb
, nc
, 1) != 0)
2987 D_ASSERT(mdev
->vnr
== volume
);
2988 D_ASSERT(mdev
->tconn
== tconn
);
2990 dh
->minor
= mdev_to_minor(mdev
);
2991 dh
->ret_code
= NO_ERROR
;
2993 if (nla_put_status_info(skb
, mdev
, NULL
)) {
2995 genlmsg_cancel(skb
, dh
);
2999 genlmsg_end(skb
, dh
);
3004 /* where to start the next iteration */
3005 cb
->args
[0] = (long)pos
;
3006 cb
->args
[1] = (pos
== tconn
) ? volume
+ 1 : 0;
3008 /* No more tconns/volumes/minors found results in an empty skb.
3009 * Which will terminate the dump. */
3014 * Request status of all resources, or of all volumes within a single resource.
3016 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3017 * Which means we cannot use the family->attrbuf or other such members, because
3018 * dump is NOT protected by the genl_lock(). During dump, we only have access
3019 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3021 * Once things are setup properly, we call into get_one_status().
3023 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3025 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
3027 const char *resource_name
;
3028 struct drbd_tconn
*tconn
;
3031 /* Is this a followup call? */
3033 /* ... of a single resource dump,
3034 * and the resource iterator has been advanced already? */
3035 if (cb
->args
[2] && cb
->args
[2] != cb
->args
[0])
3036 return 0; /* DONE. */
3040 /* First call (from netlink_dump_start). We need to figure out
3041 * which resource(s) the user wants us to dump. */
3042 nla
= nla_find(nlmsg_attrdata(cb
->nlh
, hdrlen
),
3043 nlmsg_attrlen(cb
->nlh
, hdrlen
),
3044 DRBD_NLA_CFG_CONTEXT
);
3046 /* No explicit context given. Dump all. */
3049 maxtype
= ARRAY_SIZE(drbd_cfg_context_nl_policy
) - 1;
3050 nla
= drbd_nla_find_nested(maxtype
, nla
, __nla_type(T_ctx_resource_name
));
3052 return PTR_ERR(nla
);
3053 /* context given, but no name present? */
3056 resource_name
= nla_data(nla
);
3057 tconn
= conn_get_by_name(resource_name
);
3062 kref_put(&tconn
->kref
, &conn_destroy
); /* get_one_status() (re)validates tconn by itself */
3064 /* prime iterators, and set "filter" mode mark:
3065 * only dump this tconn. */
3066 cb
->args
[0] = (long)tconn
;
3067 /* cb->args[1] = 0; passed in this way. */
3068 cb
->args
[2] = (long)tconn
;
3071 return get_one_status(skb
, cb
);
3074 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
)
3076 enum drbd_ret_code retcode
;
3077 struct timeout_parms tp
;
3080 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
3081 if (!adm_ctx
.reply_skb
)
3083 if (retcode
!= NO_ERROR
)
3087 adm_ctx
.mdev
->state
.pdsk
== D_OUTDATED
? UT_PEER_OUTDATED
:
3088 test_bit(USE_DEGR_WFC_T
, &adm_ctx
.mdev
->flags
) ? UT_DEGRADED
:
3091 err
= timeout_parms_to_priv_skb(adm_ctx
.reply_skb
, &tp
);
3093 nlmsg_free(adm_ctx
.reply_skb
);
3097 drbd_adm_finish(info
, retcode
);
3101 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
)
3103 struct drbd_conf
*mdev
;
3104 enum drbd_ret_code retcode
;
3105 struct start_ov_parms parms
;
3107 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
3108 if (!adm_ctx
.reply_skb
)
3110 if (retcode
!= NO_ERROR
)
3113 mdev
= adm_ctx
.mdev
;
3115 /* resume from last known position, if possible */
3116 parms
.ov_start_sector
= mdev
->ov_start_sector
;
3117 parms
.ov_stop_sector
= ULLONG_MAX
;
3118 if (info
->attrs
[DRBD_NLA_START_OV_PARMS
]) {
3119 int err
= start_ov_parms_from_attrs(&parms
, info
);
3121 retcode
= ERR_MANDATORY_TAG
;
3122 drbd_msg_put_info(from_attrs_err_to_txt(err
));
3126 /* w_make_ov_request expects position to be aligned */
3127 mdev
->ov_start_sector
= parms
.ov_start_sector
& ~(BM_SECT_PER_BIT
-1);
3128 mdev
->ov_stop_sector
= parms
.ov_stop_sector
;
3130 /* If there is still bitmap IO pending, e.g. previous resync or verify
3131 * just being finished, wait for it before requesting a new resync. */
3132 drbd_suspend_io(mdev
);
3133 wait_event(mdev
->misc_wait
, !test_bit(BITMAP_IO
, &mdev
->flags
));
3134 retcode
= drbd_request_state(mdev
,NS(conn
,C_VERIFY_S
));
3135 drbd_resume_io(mdev
);
3137 drbd_adm_finish(info
, retcode
);
3142 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
)
3144 struct drbd_conf
*mdev
;
3145 enum drbd_ret_code retcode
;
3146 int skip_initial_sync
= 0;
3148 struct new_c_uuid_parms args
;
3150 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
3151 if (!adm_ctx
.reply_skb
)
3153 if (retcode
!= NO_ERROR
)
3156 mdev
= adm_ctx
.mdev
;
3157 memset(&args
, 0, sizeof(args
));
3158 if (info
->attrs
[DRBD_NLA_NEW_C_UUID_PARMS
]) {
3159 err
= new_c_uuid_parms_from_attrs(&args
, info
);
3161 retcode
= ERR_MANDATORY_TAG
;
3162 drbd_msg_put_info(from_attrs_err_to_txt(err
));
3167 mutex_lock(mdev
->state_mutex
); /* Protects us against serialized state changes. */
3169 if (!get_ldev(mdev
)) {
3170 retcode
= ERR_NO_DISK
;
3174 /* this is "skip initial sync", assume to be clean */
3175 if (mdev
->state
.conn
== C_CONNECTED
&& mdev
->tconn
->agreed_pro_version
>= 90 &&
3176 mdev
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&& args
.clear_bm
) {
3177 dev_info(DEV
, "Preparing to skip initial sync\n");
3178 skip_initial_sync
= 1;
3179 } else if (mdev
->state
.conn
!= C_STANDALONE
) {
3180 retcode
= ERR_CONNECTED
;
3184 drbd_uuid_set(mdev
, UI_BITMAP
, 0); /* Rotate UI_BITMAP to History 1, etc... */
3185 drbd_uuid_new_current(mdev
); /* New current, previous to UI_BITMAP */
3187 if (args
.clear_bm
) {
3188 err
= drbd_bitmap_io(mdev
, &drbd_bmio_clear_n_write
,
3189 "clear_n_write from new_c_uuid", BM_LOCKED_MASK
);
3191 dev_err(DEV
, "Writing bitmap failed with %d\n",err
);
3192 retcode
= ERR_IO_MD_DISK
;
3194 if (skip_initial_sync
) {
3195 drbd_send_uuids_skip_initial_sync(mdev
);
3196 _drbd_uuid_set(mdev
, UI_BITMAP
, 0);
3197 drbd_print_uuids(mdev
, "cleared bitmap UUID");
3198 spin_lock_irq(&mdev
->tconn
->req_lock
);
3199 _drbd_set_state(_NS2(mdev
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
3201 spin_unlock_irq(&mdev
->tconn
->req_lock
);
3209 mutex_unlock(mdev
->state_mutex
);
3211 drbd_adm_finish(info
, retcode
);
3215 static enum drbd_ret_code
3216 drbd_check_resource_name(const char *name
)
3218 if (!name
|| !name
[0]) {
3219 drbd_msg_put_info("resource name missing");
3220 return ERR_MANDATORY_TAG
;
3222 /* if we want to use these in sysfs/configfs/debugfs some day,
3223 * we must not allow slashes */
3224 if (strchr(name
, '/')) {
3225 drbd_msg_put_info("invalid resource name");
3226 return ERR_INVALID_REQUEST
;
3231 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
)
3233 enum drbd_ret_code retcode
;
3234 struct res_opts res_opts
;
3237 retcode
= drbd_adm_prepare(skb
, info
, 0);
3238 if (!adm_ctx
.reply_skb
)
3240 if (retcode
!= NO_ERROR
)
3243 set_res_opts_defaults(&res_opts
);
3244 err
= res_opts_from_attrs(&res_opts
, info
);
3245 if (err
&& err
!= -ENOMSG
) {
3246 retcode
= ERR_MANDATORY_TAG
;
3247 drbd_msg_put_info(from_attrs_err_to_txt(err
));
3251 retcode
= drbd_check_resource_name(adm_ctx
.resource_name
);
3252 if (retcode
!= NO_ERROR
)
3255 if (adm_ctx
.tconn
) {
3256 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
) {
3257 retcode
= ERR_INVALID_REQUEST
;
3258 drbd_msg_put_info("resource exists");
3260 /* else: still NO_ERROR */
3264 if (!conn_create(adm_ctx
.resource_name
, &res_opts
))
3265 retcode
= ERR_NOMEM
;
3267 drbd_adm_finish(info
, retcode
);
3271 int drbd_adm_add_minor(struct sk_buff
*skb
, struct genl_info
*info
)
3273 struct drbd_genlmsghdr
*dh
= info
->userhdr
;
3274 enum drbd_ret_code retcode
;
3276 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_RESOURCE
);
3277 if (!adm_ctx
.reply_skb
)
3279 if (retcode
!= NO_ERROR
)
3282 if (dh
->minor
> MINORMASK
) {
3283 drbd_msg_put_info("requested minor out of range");
3284 retcode
= ERR_INVALID_REQUEST
;
3287 if (adm_ctx
.volume
> DRBD_VOLUME_MAX
) {
3288 drbd_msg_put_info("requested volume id out of range");
3289 retcode
= ERR_INVALID_REQUEST
;
3293 /* drbd_adm_prepare made sure already
3294 * that mdev->tconn and mdev->vnr match the request. */
3296 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
3297 retcode
= ERR_MINOR_EXISTS
;
3298 /* else: still NO_ERROR */
3302 retcode
= conn_new_minor(adm_ctx
.tconn
, dh
->minor
, adm_ctx
.volume
);
3304 drbd_adm_finish(info
, retcode
);
3308 static enum drbd_ret_code
adm_delete_minor(struct drbd_conf
*mdev
)
3310 if (mdev
->state
.disk
== D_DISKLESS
&&
3311 /* no need to be mdev->state.conn == C_STANDALONE &&
3312 * we may want to delete a minor from a live replication group.
3314 mdev
->state
.role
== R_SECONDARY
) {
3315 _drbd_request_state(mdev
, NS(conn
, C_WF_REPORT_PARAMS
),
3316 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
3317 idr_remove(&mdev
->tconn
->volumes
, mdev
->vnr
);
3318 idr_remove(&minors
, mdev_to_minor(mdev
));
3319 destroy_workqueue(mdev
->submit
.wq
);
3320 del_gendisk(mdev
->vdisk
);
3322 kref_put(&mdev
->kref
, &drbd_minor_destroy
);
3325 return ERR_MINOR_CONFIGURED
;
3328 int drbd_adm_delete_minor(struct sk_buff
*skb
, struct genl_info
*info
)
3330 enum drbd_ret_code retcode
;
3332 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_MINOR
);
3333 if (!adm_ctx
.reply_skb
)
3335 if (retcode
!= NO_ERROR
)
3338 retcode
= adm_delete_minor(adm_ctx
.mdev
);
3340 drbd_adm_finish(info
, retcode
);
3344 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
)
3346 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3347 struct drbd_conf
*mdev
;
3350 retcode
= drbd_adm_prepare(skb
, info
, 0);
3351 if (!adm_ctx
.reply_skb
)
3353 if (retcode
!= NO_ERROR
)
3356 if (!adm_ctx
.tconn
) {
3357 retcode
= ERR_RES_NOT_KNOWN
;
3362 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3363 retcode
= drbd_set_role(mdev
, R_SECONDARY
, 0);
3364 if (retcode
< SS_SUCCESS
) {
3365 drbd_msg_put_info("failed to demote");
3370 retcode
= conn_try_disconnect(adm_ctx
.tconn
, 0);
3371 if (retcode
< SS_SUCCESS
) {
3372 drbd_msg_put_info("failed to disconnect");
3377 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3378 retcode
= adm_detach(mdev
, 0);
3379 if (retcode
< SS_SUCCESS
|| retcode
> NO_ERROR
) {
3380 drbd_msg_put_info("failed to detach");
3385 /* If we reach this, all volumes (of this tconn) are Secondary,
3386 * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
3387 * actually stopped, state handling only does drbd_thread_stop_nowait(). */
3388 drbd_thread_stop(&adm_ctx
.tconn
->worker
);
3390 /* Now, nothing can fail anymore */
3392 /* delete volumes */
3393 idr_for_each_entry(&adm_ctx
.tconn
->volumes
, mdev
, i
) {
3394 retcode
= adm_delete_minor(mdev
);
3395 if (retcode
!= NO_ERROR
) {
3396 /* "can not happen" */
3397 drbd_msg_put_info("failed to delete volume");
3402 /* delete connection */
3403 if (conn_lowest_minor(adm_ctx
.tconn
) < 0) {
3404 list_del_rcu(&adm_ctx
.tconn
->all_tconn
);
3406 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
3410 /* "can not happen" */
3411 retcode
= ERR_RES_IN_USE
;
3412 drbd_msg_put_info("failed to delete connection");
3416 drbd_adm_finish(info
, retcode
);
3420 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
)
3422 enum drbd_ret_code retcode
;
3424 retcode
= drbd_adm_prepare(skb
, info
, DRBD_ADM_NEED_RESOURCE
);
3425 if (!adm_ctx
.reply_skb
)
3427 if (retcode
!= NO_ERROR
)
3430 if (conn_lowest_minor(adm_ctx
.tconn
) < 0) {
3431 list_del_rcu(&adm_ctx
.tconn
->all_tconn
);
3433 kref_put(&adm_ctx
.tconn
->kref
, &conn_destroy
);
3437 retcode
= ERR_RES_IN_USE
;
3440 if (retcode
== NO_ERROR
)
3441 drbd_thread_stop(&adm_ctx
.tconn
->worker
);
3443 drbd_adm_finish(info
, retcode
);
3447 void drbd_bcast_event(struct drbd_conf
*mdev
, const struct sib_info
*sib
)
3449 static atomic_t drbd_genl_seq
= ATOMIC_INIT(2); /* two. */
3450 struct sk_buff
*msg
;
3451 struct drbd_genlmsghdr
*d_out
;
3455 if (sib
->sib_reason
== SIB_SYNC_PROGRESS
) {
3456 if (time_after(jiffies
, mdev
->rs_last_bcast
+ HZ
))
3457 mdev
->rs_last_bcast
= jiffies
;
3462 seq
= atomic_inc_return(&drbd_genl_seq
);
3463 msg
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
3468 d_out
= genlmsg_put(msg
, 0, seq
, &drbd_genl_family
, 0, DRBD_EVENT
);
3469 if (!d_out
) /* cannot happen, but anyways. */
3470 goto nla_put_failure
;
3471 d_out
->minor
= mdev_to_minor(mdev
);
3472 d_out
->ret_code
= NO_ERROR
;
3474 if (nla_put_status_info(msg
, mdev
, sib
))
3475 goto nla_put_failure
;
3476 genlmsg_end(msg
, d_out
);
3477 err
= drbd_genl_multicast_events(msg
, 0);
3478 /* msg has been consumed or freed in netlink_broadcast() */
3479 if (err
&& err
!= -ESRCH
)
3487 dev_err(DEV
, "Error %d while broadcasting event. "
3488 "Event seq:%u sib_reason:%u\n",
3489 err
, seq
, sib
->sib_reason
);