4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
28 #include <linux/module.h>
29 #include <linux/drbd.h>
32 #include <linux/file.h>
33 #include <linux/slab.h>
34 #include <linux/blkpg.h>
35 #include <linux/cpumask.h>
37 #include "drbd_protocol.h"
39 #include "drbd_state_change.h"
40 #include <asm/unaligned.h>
41 #include <linux/drbd_limits.h>
42 #include <linux/kthread.h>
44 #include <net/genetlink.h>
47 // int drbd_adm_create_resource(struct sk_buff *skb, struct genl_info *info);
48 // int drbd_adm_delete_resource(struct sk_buff *skb, struct genl_info *info);
50 int drbd_adm_new_minor(struct sk_buff
*skb
, struct genl_info
*info
);
51 int drbd_adm_del_minor(struct sk_buff
*skb
, struct genl_info
*info
);
53 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
);
54 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
);
55 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
);
57 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
);
58 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
);
59 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
);
60 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
);
61 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
);
62 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
);
63 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
);
64 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
);
65 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
);
66 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
);
67 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
);
68 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
);
69 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
);
70 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
);
71 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
);
72 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
);
73 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
);
74 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
);
75 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
);
76 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
);
78 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
);
79 int drbd_adm_dump_resources(struct sk_buff
*skb
, struct netlink_callback
*cb
);
80 int drbd_adm_dump_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
);
81 int drbd_adm_dump_devices_done(struct netlink_callback
*cb
);
82 int drbd_adm_dump_connections(struct sk_buff
*skb
, struct netlink_callback
*cb
);
83 int drbd_adm_dump_connections_done(struct netlink_callback
*cb
);
84 int drbd_adm_dump_peer_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
);
85 int drbd_adm_dump_peer_devices_done(struct netlink_callback
*cb
);
86 int drbd_adm_get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
);
88 #include <linux/drbd_genl_api.h>
90 #include <linux/genl_magic_func.h>
92 static atomic_t drbd_genl_seq
= ATOMIC_INIT(2); /* two. */
93 static atomic_t notify_genl_seq
= ATOMIC_INIT(2); /* two. */
95 DEFINE_MUTEX(notification_mutex
);
97 /* used blkdev_get_by_path, to claim our meta data device(s) */
98 static char *drbd_m_holder
= "Hands off! this is DRBD's meta data device.";
100 static void drbd_adm_send_reply(struct sk_buff
*skb
, struct genl_info
*info
)
102 genlmsg_end(skb
, genlmsg_data(nlmsg_data(nlmsg_hdr(skb
))));
103 if (genlmsg_reply(skb
, info
))
104 pr_err("error sending genl reply\n");
107 /* Used on a fresh "drbd_adm_prepare"d reply_skb, this cannot fail: The only
108 * reason it could fail was no space in skb, and there are 4k available. */
109 static int drbd_msg_put_info(struct sk_buff
*skb
, const char *info
)
114 if (!info
|| !info
[0])
117 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_REPLY
);
121 err
= nla_put_string(skb
, T_info_text
, info
);
123 nla_nest_cancel(skb
, nla
);
126 nla_nest_end(skb
, nla
);
130 /* This would be a good candidate for a "pre_doit" hook,
131 * and per-family private info->pointers.
132 * But we need to stay compatible with older kernels.
133 * If it returns successfully, adm_ctx members are valid.
135 * At this point, we still rely on the global genl_lock().
136 * If we want to avoid that, and allow "genl_family.parallel_ops", we may need
137 * to add additional synchronization against object destruction/modification.
139 #define DRBD_ADM_NEED_MINOR 1
140 #define DRBD_ADM_NEED_RESOURCE 2
141 #define DRBD_ADM_NEED_CONNECTION 4
142 static int drbd_adm_prepare(struct drbd_config_context
*adm_ctx
,
143 struct sk_buff
*skb
, struct genl_info
*info
, unsigned flags
)
145 struct drbd_genlmsghdr
*d_in
= info
->userhdr
;
146 const u8 cmd
= info
->genlhdr
->cmd
;
149 memset(adm_ctx
, 0, sizeof(*adm_ctx
));
151 /* genl_rcv_msg only checks for CAP_NET_ADMIN on "GENL_ADMIN_PERM" :( */
152 if (cmd
!= DRBD_ADM_GET_STATUS
&& !capable(CAP_NET_ADMIN
))
155 adm_ctx
->reply_skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_KERNEL
);
156 if (!adm_ctx
->reply_skb
) {
161 adm_ctx
->reply_dh
= genlmsg_put_reply(adm_ctx
->reply_skb
,
162 info
, &drbd_genl_family
, 0, cmd
);
163 /* put of a few bytes into a fresh skb of >= 4k will always succeed.
165 if (!adm_ctx
->reply_dh
) {
170 adm_ctx
->reply_dh
->minor
= d_in
->minor
;
171 adm_ctx
->reply_dh
->ret_code
= NO_ERROR
;
173 adm_ctx
->volume
= VOLUME_UNSPECIFIED
;
174 if (info
->attrs
[DRBD_NLA_CFG_CONTEXT
]) {
176 /* parse and validate only */
177 err
= drbd_cfg_context_from_attrs(NULL
, info
);
181 /* It was present, and valid,
182 * copy it over to the reply skb. */
183 err
= nla_put_nohdr(adm_ctx
->reply_skb
,
184 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]->nla_len
,
185 info
->attrs
[DRBD_NLA_CFG_CONTEXT
]);
189 /* and assign stuff to the adm_ctx */
190 nla
= nested_attr_tb
[__nla_type(T_ctx_volume
)];
192 adm_ctx
->volume
= nla_get_u32(nla
);
193 nla
= nested_attr_tb
[__nla_type(T_ctx_resource_name
)];
195 adm_ctx
->resource_name
= nla_data(nla
);
196 adm_ctx
->my_addr
= nested_attr_tb
[__nla_type(T_ctx_my_addr
)];
197 adm_ctx
->peer_addr
= nested_attr_tb
[__nla_type(T_ctx_peer_addr
)];
198 if ((adm_ctx
->my_addr
&&
199 nla_len(adm_ctx
->my_addr
) > sizeof(adm_ctx
->connection
->my_addr
)) ||
200 (adm_ctx
->peer_addr
&&
201 nla_len(adm_ctx
->peer_addr
) > sizeof(adm_ctx
->connection
->peer_addr
))) {
207 adm_ctx
->minor
= d_in
->minor
;
208 adm_ctx
->device
= minor_to_device(d_in
->minor
);
210 /* We are protected by the global genl_lock().
211 * But we may explicitly drop it/retake it in drbd_adm_set_role(),
212 * so make sure this object stays around. */
214 kref_get(&adm_ctx
->device
->kref
);
216 if (adm_ctx
->resource_name
) {
217 adm_ctx
->resource
= drbd_find_resource(adm_ctx
->resource_name
);
220 if (!adm_ctx
->device
&& (flags
& DRBD_ADM_NEED_MINOR
)) {
221 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown minor");
222 return ERR_MINOR_INVALID
;
224 if (!adm_ctx
->resource
&& (flags
& DRBD_ADM_NEED_RESOURCE
)) {
225 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown resource");
226 if (adm_ctx
->resource_name
)
227 return ERR_RES_NOT_KNOWN
;
228 return ERR_INVALID_REQUEST
;
231 if (flags
& DRBD_ADM_NEED_CONNECTION
) {
232 if (adm_ctx
->resource
) {
233 drbd_msg_put_info(adm_ctx
->reply_skb
, "no resource name expected");
234 return ERR_INVALID_REQUEST
;
236 if (adm_ctx
->device
) {
237 drbd_msg_put_info(adm_ctx
->reply_skb
, "no minor number expected");
238 return ERR_INVALID_REQUEST
;
240 if (adm_ctx
->my_addr
&& adm_ctx
->peer_addr
)
241 adm_ctx
->connection
= conn_get_by_addrs(nla_data(adm_ctx
->my_addr
),
242 nla_len(adm_ctx
->my_addr
),
243 nla_data(adm_ctx
->peer_addr
),
244 nla_len(adm_ctx
->peer_addr
));
245 if (!adm_ctx
->connection
) {
246 drbd_msg_put_info(adm_ctx
->reply_skb
, "unknown connection");
247 return ERR_INVALID_REQUEST
;
251 /* some more paranoia, if the request was over-determined */
252 if (adm_ctx
->device
&& adm_ctx
->resource
&&
253 adm_ctx
->device
->resource
!= adm_ctx
->resource
) {
254 pr_warning("request: minor=%u, resource=%s; but that minor belongs to resource %s\n",
255 adm_ctx
->minor
, adm_ctx
->resource
->name
,
256 adm_ctx
->device
->resource
->name
);
257 drbd_msg_put_info(adm_ctx
->reply_skb
, "minor exists in different resource");
258 return ERR_INVALID_REQUEST
;
260 if (adm_ctx
->device
&&
261 adm_ctx
->volume
!= VOLUME_UNSPECIFIED
&&
262 adm_ctx
->volume
!= adm_ctx
->device
->vnr
) {
263 pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
264 adm_ctx
->minor
, adm_ctx
->volume
,
265 adm_ctx
->device
->vnr
,
266 adm_ctx
->device
->resource
->name
);
267 drbd_msg_put_info(adm_ctx
->reply_skb
, "minor exists as different volume");
268 return ERR_INVALID_REQUEST
;
271 /* still, provide adm_ctx->resource always, if possible. */
272 if (!adm_ctx
->resource
) {
273 adm_ctx
->resource
= adm_ctx
->device
? adm_ctx
->device
->resource
274 : adm_ctx
->connection
? adm_ctx
->connection
->resource
: NULL
;
275 if (adm_ctx
->resource
)
276 kref_get(&adm_ctx
->resource
->kref
);
282 nlmsg_free(adm_ctx
->reply_skb
);
283 adm_ctx
->reply_skb
= NULL
;
287 static int drbd_adm_finish(struct drbd_config_context
*adm_ctx
,
288 struct genl_info
*info
, int retcode
)
290 if (adm_ctx
->device
) {
291 kref_put(&adm_ctx
->device
->kref
, drbd_destroy_device
);
292 adm_ctx
->device
= NULL
;
294 if (adm_ctx
->connection
) {
295 kref_put(&adm_ctx
->connection
->kref
, &drbd_destroy_connection
);
296 adm_ctx
->connection
= NULL
;
298 if (adm_ctx
->resource
) {
299 kref_put(&adm_ctx
->resource
->kref
, drbd_destroy_resource
);
300 adm_ctx
->resource
= NULL
;
303 if (!adm_ctx
->reply_skb
)
306 adm_ctx
->reply_dh
->ret_code
= retcode
;
307 drbd_adm_send_reply(adm_ctx
->reply_skb
, info
);
311 static void setup_khelper_env(struct drbd_connection
*connection
, char **envp
)
315 /* FIXME: A future version will not allow this case. */
316 if (connection
->my_addr_len
== 0 || connection
->peer_addr_len
== 0)
319 switch (((struct sockaddr
*)&connection
->peer_addr
)->sa_family
) {
322 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI6",
323 &((struct sockaddr_in6
*)&connection
->peer_addr
)->sin6_addr
);
327 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
328 &((struct sockaddr_in
*)&connection
->peer_addr
)->sin_addr
);
332 snprintf(envp
[4], 60, "DRBD_PEER_ADDRESS=%pI4",
333 &((struct sockaddr_in
*)&connection
->peer_addr
)->sin_addr
);
335 snprintf(envp
[3], 20, "DRBD_PEER_AF=%s", afs
);
338 int drbd_khelper(struct drbd_device
*device
, char *cmd
)
340 char *envp
[] = { "HOME=/",
342 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
343 (char[20]) { }, /* address family */
344 (char[60]) { }, /* address */
347 char *argv
[] = {usermode_helper
, cmd
, mb
, NULL
};
348 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
352 if (current
== connection
->worker
.task
)
353 set_bit(CALLBACK_PENDING
, &connection
->flags
);
355 snprintf(mb
, 12, "minor-%d", device_to_minor(device
));
356 setup_khelper_env(connection
, envp
);
358 /* The helper may take some time.
359 * write out any unsynced meta data changes now */
360 drbd_md_sync(device
);
362 drbd_info(device
, "helper command: %s %s %s\n", usermode_helper
, cmd
, mb
);
363 sib
.sib_reason
= SIB_HELPER_PRE
;
364 sib
.helper_name
= cmd
;
365 drbd_bcast_event(device
, &sib
);
366 notify_helper(NOTIFY_CALL
, device
, connection
, cmd
, 0);
367 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
369 drbd_warn(device
, "helper command: %s %s %s exit code %u (0x%x)\n",
370 usermode_helper
, cmd
, mb
,
371 (ret
>> 8) & 0xff, ret
);
373 drbd_info(device
, "helper command: %s %s %s exit code %u (0x%x)\n",
374 usermode_helper
, cmd
, mb
,
375 (ret
>> 8) & 0xff, ret
);
376 sib
.sib_reason
= SIB_HELPER_POST
;
377 sib
.helper_exit_code
= ret
;
378 drbd_bcast_event(device
, &sib
);
379 notify_helper(NOTIFY_RESPONSE
, device
, connection
, cmd
, ret
);
381 if (current
== connection
->worker
.task
)
382 clear_bit(CALLBACK_PENDING
, &connection
->flags
);
384 if (ret
< 0) /* Ignore any ERRNOs we got. */
390 static int conn_khelper(struct drbd_connection
*connection
, char *cmd
)
392 char *envp
[] = { "HOME=/",
394 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
395 (char[20]) { }, /* address family */
396 (char[60]) { }, /* address */
398 char *resource_name
= connection
->resource
->name
;
399 char *argv
[] = {usermode_helper
, cmd
, resource_name
, NULL
};
402 setup_khelper_env(connection
, envp
);
403 conn_md_sync(connection
);
405 drbd_info(connection
, "helper command: %s %s %s\n", usermode_helper
, cmd
, resource_name
);
406 /* TODO: conn_bcast_event() ?? */
407 notify_helper(NOTIFY_CALL
, NULL
, connection
, cmd
, 0);
409 ret
= call_usermodehelper(usermode_helper
, argv
, envp
, UMH_WAIT_PROC
);
411 drbd_warn(connection
, "helper command: %s %s %s exit code %u (0x%x)\n",
412 usermode_helper
, cmd
, resource_name
,
413 (ret
>> 8) & 0xff, ret
);
415 drbd_info(connection
, "helper command: %s %s %s exit code %u (0x%x)\n",
416 usermode_helper
, cmd
, resource_name
,
417 (ret
>> 8) & 0xff, ret
);
418 /* TODO: conn_bcast_event() ?? */
419 notify_helper(NOTIFY_RESPONSE
, NULL
, connection
, cmd
, ret
);
421 if (ret
< 0) /* Ignore any ERRNOs we got. */
427 static enum drbd_fencing_p
highest_fencing_policy(struct drbd_connection
*connection
)
429 enum drbd_fencing_p fp
= FP_NOT_AVAIL
;
430 struct drbd_peer_device
*peer_device
;
434 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
435 struct drbd_device
*device
= peer_device
->device
;
436 if (get_ldev_if_state(device
, D_CONSISTENT
)) {
437 struct disk_conf
*disk_conf
=
438 rcu_dereference(peer_device
->device
->ldev
->disk_conf
);
439 fp
= max_t(enum drbd_fencing_p
, fp
, disk_conf
->fencing
);
445 if (fp
== FP_NOT_AVAIL
) {
446 /* IO Suspending works on the whole resource.
447 Do it only for one device. */
449 peer_device
= idr_get_next(&connection
->peer_devices
, &vnr
);
450 drbd_change_state(peer_device
->device
, CS_VERBOSE
| CS_HARD
, NS(susp_fen
, 0));
456 bool conn_try_outdate_peer(struct drbd_connection
*connection
)
458 unsigned int connect_cnt
;
459 union drbd_state mask
= { };
460 union drbd_state val
= { };
461 enum drbd_fencing_p fp
;
465 spin_lock_irq(&connection
->resource
->req_lock
);
466 if (connection
->cstate
>= C_WF_REPORT_PARAMS
) {
467 drbd_err(connection
, "Expected cstate < C_WF_REPORT_PARAMS\n");
468 spin_unlock_irq(&connection
->resource
->req_lock
);
472 connect_cnt
= connection
->connect_cnt
;
473 spin_unlock_irq(&connection
->resource
->req_lock
);
475 fp
= highest_fencing_policy(connection
);
478 drbd_warn(connection
, "Not fencing peer, I'm not even Consistent myself.\n");
485 r
= conn_khelper(connection
, "fence-peer");
487 switch ((r
>>8) & 0xff) {
488 case 3: /* peer is inconsistent */
489 ex_to_string
= "peer is inconsistent or worse";
491 val
.pdsk
= D_INCONSISTENT
;
493 case 4: /* peer got outdated, or was already outdated */
494 ex_to_string
= "peer was fenced";
496 val
.pdsk
= D_OUTDATED
;
498 case 5: /* peer was down */
499 if (conn_highest_disk(connection
) == D_UP_TO_DATE
) {
500 /* we will(have) create(d) a new UUID anyways... */
501 ex_to_string
= "peer is unreachable, assumed to be dead";
503 val
.pdsk
= D_OUTDATED
;
505 ex_to_string
= "peer unreachable, doing nothing since disk != UpToDate";
508 case 6: /* Peer is primary, voluntarily outdate myself.
509 * This is useful when an unconnected R_SECONDARY is asked to
510 * become R_PRIMARY, but finds the other peer being active. */
511 ex_to_string
= "peer is active";
512 drbd_warn(connection
, "Peer is primary, outdating myself.\n");
514 val
.disk
= D_OUTDATED
;
517 if (fp
!= FP_STONITH
)
518 drbd_err(connection
, "fence-peer() = 7 && fencing != Stonith !!!\n");
519 ex_to_string
= "peer was stonithed";
521 val
.pdsk
= D_OUTDATED
;
524 /* The script is broken ... */
525 drbd_err(connection
, "fence-peer helper broken, returned %d\n", (r
>>8)&0xff);
526 return false; /* Eventually leave IO frozen */
529 drbd_info(connection
, "fence-peer helper returned %d (%s)\n",
530 (r
>>8) & 0xff, ex_to_string
);
535 conn_request_state(connection, mask, val, CS_VERBOSE);
536 here, because we might were able to re-establish the connection in the
538 spin_lock_irq(&connection
->resource
->req_lock
);
539 if (connection
->cstate
< C_WF_REPORT_PARAMS
&& !test_bit(STATE_SENT
, &connection
->flags
)) {
540 if (connection
->connect_cnt
!= connect_cnt
)
541 /* In case the connection was established and droped
542 while the fence-peer handler was running, ignore it */
543 drbd_info(connection
, "Ignoring fence-peer exit code\n");
545 _conn_request_state(connection
, mask
, val
, CS_VERBOSE
);
547 spin_unlock_irq(&connection
->resource
->req_lock
);
549 return conn_highest_pdsk(connection
) <= D_OUTDATED
;
552 static int _try_outdate_peer_async(void *data
)
554 struct drbd_connection
*connection
= (struct drbd_connection
*)data
;
556 conn_try_outdate_peer(connection
);
558 kref_put(&connection
->kref
, drbd_destroy_connection
);
562 void conn_try_outdate_peer_async(struct drbd_connection
*connection
)
564 struct task_struct
*opa
;
566 kref_get(&connection
->kref
);
567 /* We may just have force_sig()'ed this thread
568 * to get it out of some blocking network function.
569 * Clear signals; otherwise kthread_run(), which internally uses
570 * wait_on_completion_killable(), will mistake our pending signal
571 * for a new fatal signal and fail. */
572 flush_signals(current
);
573 opa
= kthread_run(_try_outdate_peer_async
, connection
, "drbd_async_h");
575 drbd_err(connection
, "out of mem, failed to invoke fence-peer helper\n");
576 kref_put(&connection
->kref
, drbd_destroy_connection
);
581 drbd_set_role(struct drbd_device
*const device
, enum drbd_role new_role
, int force
)
583 struct drbd_peer_device
*const peer_device
= first_peer_device(device
);
584 struct drbd_connection
*const connection
= peer_device
? peer_device
->connection
: NULL
;
585 const int max_tries
= 4;
586 enum drbd_state_rv rv
= SS_UNKNOWN_ERROR
;
590 union drbd_state mask
, val
;
592 if (new_role
== R_PRIMARY
) {
593 struct drbd_connection
*connection
;
595 /* Detect dead peers as soon as possible. */
598 for_each_connection(connection
, device
->resource
)
599 request_ping(connection
);
603 mutex_lock(device
->state_mutex
);
605 mask
.i
= 0; mask
.role
= R_MASK
;
606 val
.i
= 0; val
.role
= new_role
;
608 while (try++ < max_tries
) {
609 rv
= _drbd_request_state_holding_state_mutex(device
, mask
, val
, CS_WAIT_COMPLETE
);
611 /* in case we first succeeded to outdate,
612 * but now suddenly could establish a connection */
613 if (rv
== SS_CW_FAILED_BY_PEER
&& mask
.pdsk
!= 0) {
619 if (rv
== SS_NO_UP_TO_DATE_DISK
&& force
&&
620 (device
->state
.disk
< D_UP_TO_DATE
&&
621 device
->state
.disk
>= D_INCONSISTENT
)) {
623 val
.disk
= D_UP_TO_DATE
;
628 if (rv
== SS_NO_UP_TO_DATE_DISK
&&
629 device
->state
.disk
== D_CONSISTENT
&& mask
.pdsk
== 0) {
630 D_ASSERT(device
, device
->state
.pdsk
== D_UNKNOWN
);
632 if (conn_try_outdate_peer(connection
)) {
633 val
.disk
= D_UP_TO_DATE
;
639 if (rv
== SS_NOTHING_TO_DO
)
641 if (rv
== SS_PRIMARY_NOP
&& mask
.pdsk
== 0) {
642 if (!conn_try_outdate_peer(connection
) && force
) {
643 drbd_warn(device
, "Forced into split brain situation!\n");
645 val
.pdsk
= D_OUTDATED
;
650 if (rv
== SS_TWO_PRIMARIES
) {
651 /* Maybe the peer is detected as dead very soon...
652 retry at most once more in this case. */
655 nc
= rcu_dereference(connection
->net_conf
);
656 timeo
= nc
? (nc
->ping_timeo
+ 1) * HZ
/ 10 : 1;
658 schedule_timeout_interruptible(timeo
);
663 if (rv
< SS_SUCCESS
) {
664 rv
= _drbd_request_state(device
, mask
, val
,
665 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
676 drbd_warn(device
, "Forced to consider local data as UpToDate!\n");
678 /* Wait until nothing is on the fly :) */
679 wait_event(device
->misc_wait
, atomic_read(&device
->ap_pending_cnt
) == 0);
681 /* FIXME also wait for all pending P_BARRIER_ACK? */
683 if (new_role
== R_SECONDARY
) {
684 if (get_ldev(device
)) {
685 device
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
689 mutex_lock(&device
->resource
->conf_update
);
690 nc
= connection
->net_conf
;
692 nc
->discard_my_data
= 0; /* without copy; single bit op is atomic */
693 mutex_unlock(&device
->resource
->conf_update
);
695 if (get_ldev(device
)) {
696 if (((device
->state
.conn
< C_CONNECTED
||
697 device
->state
.pdsk
<= D_FAILED
)
698 && device
->ldev
->md
.uuid
[UI_BITMAP
] == 0) || forced
)
699 drbd_uuid_new_current(device
);
701 device
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
706 /* writeout of activity log covered areas of the bitmap
707 * to stable storage done in after state change already */
709 if (device
->state
.conn
>= C_WF_REPORT_PARAMS
) {
710 /* if this was forced, we should consider sync */
712 drbd_send_uuids(peer_device
);
713 drbd_send_current_state(peer_device
);
716 drbd_md_sync(device
);
717 set_disk_ro(device
->vdisk
, new_role
== R_SECONDARY
);
718 kobject_uevent(&disk_to_dev(device
->vdisk
)->kobj
, KOBJ_CHANGE
);
720 mutex_unlock(device
->state_mutex
);
724 static const char *from_attrs_err_to_txt(int err
)
726 return err
== -ENOMSG
? "required attribute missing" :
727 err
== -EOPNOTSUPP
? "unknown mandatory attribute" :
728 err
== -EEXIST
? "can not change invariant setting" :
729 "invalid attribute value";
732 int drbd_adm_set_role(struct sk_buff
*skb
, struct genl_info
*info
)
734 struct drbd_config_context adm_ctx
;
735 struct set_role_parms parms
;
737 enum drbd_ret_code retcode
;
739 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
740 if (!adm_ctx
.reply_skb
)
742 if (retcode
!= NO_ERROR
)
745 memset(&parms
, 0, sizeof(parms
));
746 if (info
->attrs
[DRBD_NLA_SET_ROLE_PARMS
]) {
747 err
= set_role_parms_from_attrs(&parms
, info
);
749 retcode
= ERR_MANDATORY_TAG
;
750 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
755 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
757 if (info
->genlhdr
->cmd
== DRBD_ADM_PRIMARY
)
758 retcode
= drbd_set_role(adm_ctx
.device
, R_PRIMARY
, parms
.assume_uptodate
);
760 retcode
= drbd_set_role(adm_ctx
.device
, R_SECONDARY
, 0);
762 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
765 drbd_adm_finish(&adm_ctx
, info
, retcode
);
769 /* Initializes the md.*_offset members, so we are able to find
770 * the on disk meta data.
772 * We currently have two possible layouts:
774 * |----------- md_size_sect ------------------|
775 * [ 4k superblock ][ activity log ][ Bitmap ]
777 * | bm_offset = al_offset + X |
778 * ==> bitmap sectors = md_size_sect - bm_offset
781 * |----------- md_size_sect ------------------|
782 * [data.....][ Bitmap ][ activity log ][ 4k superblock ]
784 * | bm_offset = al_offset - Y |
785 * ==> bitmap sectors = Y = al_offset - bm_offset
787 * Activity log size used to be fixed 32kB,
788 * but is about to become configurable.
790 static void drbd_md_set_sector_offsets(struct drbd_device
*device
,
791 struct drbd_backing_dev
*bdev
)
793 sector_t md_size_sect
= 0;
794 unsigned int al_size_sect
= bdev
->md
.al_size_4k
* 8;
796 bdev
->md
.md_offset
= drbd_md_ss(bdev
);
798 switch (bdev
->md
.meta_dev_idx
) {
800 /* v07 style fixed size indexed meta data */
801 bdev
->md
.md_size_sect
= MD_128MB_SECT
;
802 bdev
->md
.al_offset
= MD_4kB_SECT
;
803 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
805 case DRBD_MD_INDEX_FLEX_EXT
:
806 /* just occupy the full device; unit: sectors */
807 bdev
->md
.md_size_sect
= drbd_get_capacity(bdev
->md_bdev
);
808 bdev
->md
.al_offset
= MD_4kB_SECT
;
809 bdev
->md
.bm_offset
= MD_4kB_SECT
+ al_size_sect
;
811 case DRBD_MD_INDEX_INTERNAL
:
812 case DRBD_MD_INDEX_FLEX_INT
:
813 /* al size is still fixed */
814 bdev
->md
.al_offset
= -al_size_sect
;
815 /* we need (slightly less than) ~ this much bitmap sectors: */
816 md_size_sect
= drbd_get_capacity(bdev
->backing_bdev
);
817 md_size_sect
= ALIGN(md_size_sect
, BM_SECT_PER_EXT
);
818 md_size_sect
= BM_SECT_TO_EXT(md_size_sect
);
819 md_size_sect
= ALIGN(md_size_sect
, 8);
821 /* plus the "drbd meta data super block",
822 * and the activity log; */
823 md_size_sect
+= MD_4kB_SECT
+ al_size_sect
;
825 bdev
->md
.md_size_sect
= md_size_sect
;
826 /* bitmap offset is adjusted by 'super' block size */
827 bdev
->md
.bm_offset
= -md_size_sect
+ MD_4kB_SECT
;
832 /* input size is expected to be in KB */
833 char *ppsize(char *buf
, unsigned long long size
)
835 /* Needs 9 bytes at max including trailing NUL:
836 * -1ULL ==> "16384 EB" */
837 static char units
[] = { 'K', 'M', 'G', 'T', 'P', 'E' };
839 while (size
>= 10000 && base
< sizeof(units
)-1) {
841 size
= (size
>> 10) + !!(size
& (1<<9));
844 sprintf(buf
, "%u %cB", (unsigned)size
, units
[base
]);
849 /* there is still a theoretical deadlock when called from receiver
850 * on an D_INCONSISTENT R_PRIMARY:
851 * remote READ does inc_ap_bio, receiver would need to receive answer
852 * packet from remote to dec_ap_bio again.
853 * receiver receive_sizes(), comes here,
854 * waits for ap_bio_cnt == 0. -> deadlock.
855 * but this cannot happen, actually, because:
856 * R_PRIMARY D_INCONSISTENT, and peer's disk is unreachable
857 * (not connected, or bad/no disk on peer):
858 * see drbd_fail_request_early, ap_bio_cnt is zero.
859 * R_PRIMARY D_INCONSISTENT, and C_SYNC_TARGET:
860 * peer may not initiate a resize.
862 /* Note these are not to be confused with
863 * drbd_adm_suspend_io/drbd_adm_resume_io,
864 * which are (sub) state changes triggered by admin (drbdsetup),
865 * and can be long lived.
866 * This changes an device->flag, is triggered by drbd internals,
867 * and should be short-lived. */
868 /* It needs to be a counter, since multiple threads might
869 independently suspend and resume IO. */
870 void drbd_suspend_io(struct drbd_device
*device
)
872 atomic_inc(&device
->suspend_cnt
);
873 if (drbd_suspended(device
))
875 wait_event(device
->misc_wait
, !atomic_read(&device
->ap_bio_cnt
));
878 void drbd_resume_io(struct drbd_device
*device
)
880 if (atomic_dec_and_test(&device
->suspend_cnt
))
881 wake_up(&device
->misc_wait
);
885 * drbd_determine_dev_size() - Sets the right device size obeying all constraints
886 * @device: DRBD device.
888 * Returns 0 on success, negative return values indicate errors.
889 * You should call drbd_md_sync() after calling this function.
891 enum determine_dev_size
892 drbd_determine_dev_size(struct drbd_device
*device
, enum dds_flags flags
, struct resize_parms
*rs
) __must_hold(local
)
894 struct md_offsets_and_sizes
{
895 u64 last_agreed_sect
;
902 u32 al_stripe_size_4k
;
904 sector_t u_size
, size
;
905 struct drbd_md
*md
= &device
->ldev
->md
;
909 int md_moved
, la_size_changed
;
910 enum determine_dev_size rv
= DS_UNCHANGED
;
912 /* We may change the on-disk offsets of our meta data below. Lock out
913 * anything that may cause meta data IO, to avoid acting on incomplete
914 * layout changes or scribbling over meta data that is in the process
917 * Move is not exactly correct, btw, currently we have all our meta
918 * data in core memory, to "move" it we just write it all out, there
920 drbd_suspend_io(device
);
921 buffer
= drbd_md_get_buffer(device
, __func__
); /* Lock meta-data IO */
923 drbd_resume_io(device
);
927 /* remember current offset and sizes */
928 prev
.last_agreed_sect
= md
->la_size_sect
;
929 prev
.md_offset
= md
->md_offset
;
930 prev
.al_offset
= md
->al_offset
;
931 prev
.bm_offset
= md
->bm_offset
;
932 prev
.md_size_sect
= md
->md_size_sect
;
933 prev
.al_stripes
= md
->al_stripes
;
934 prev
.al_stripe_size_4k
= md
->al_stripe_size_4k
;
937 /* rs is non NULL if we should change the AL layout only */
938 md
->al_stripes
= rs
->al_stripes
;
939 md
->al_stripe_size_4k
= rs
->al_stripe_size
/ 4;
940 md
->al_size_4k
= (u64
)rs
->al_stripes
* rs
->al_stripe_size
/ 4;
943 drbd_md_set_sector_offsets(device
, device
->ldev
);
946 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
948 size
= drbd_new_dev_size(device
, device
->ldev
, u_size
, flags
& DDSF_FORCED
);
950 if (size
< prev
.last_agreed_sect
) {
951 if (rs
&& u_size
== 0) {
952 /* Remove "rs &&" later. This check should always be active, but
953 right now the receiver expects the permissive behavior */
954 drbd_warn(device
, "Implicit shrink not allowed. "
955 "Use --size=%llus for explicit shrink.\n",
956 (unsigned long long)size
);
957 rv
= DS_ERROR_SHRINK
;
960 rv
= DS_ERROR_SPACE_MD
;
961 if (rv
!= DS_UNCHANGED
)
965 if (drbd_get_capacity(device
->this_bdev
) != size
||
966 drbd_bm_capacity(device
) != size
) {
968 err
= drbd_bm_resize(device
, size
, !(flags
& DDSF_NO_RESYNC
));
970 /* currently there is only one error: ENOMEM! */
971 size
= drbd_bm_capacity(device
);
973 drbd_err(device
, "OUT OF MEMORY! "
974 "Could not allocate bitmap!\n");
976 drbd_err(device
, "BM resizing failed. "
977 "Leaving size unchanged\n");
981 /* racy, see comments above. */
982 drbd_set_my_capacity(device
, size
);
983 md
->la_size_sect
= size
;
984 drbd_info(device
, "size = %s (%llu KB)\n", ppsize(ppb
, size
>>1),
985 (unsigned long long)size
>>1);
990 la_size_changed
= (prev
.last_agreed_sect
!= md
->la_size_sect
);
992 md_moved
= prev
.md_offset
!= md
->md_offset
993 || prev
.md_size_sect
!= md
->md_size_sect
;
995 if (la_size_changed
|| md_moved
|| rs
) {
998 /* We do some synchronous IO below, which may take some time.
999 * Clear the timer, to avoid scary "timer expired!" messages,
1000 * "Superblock" is written out at least twice below, anyways. */
1001 del_timer(&device
->md_sync_timer
);
1003 /* We won't change the "al-extents" setting, we just may need
1004 * to move the on-disk location of the activity log ringbuffer.
1005 * Lock for transaction is good enough, it may well be "dirty"
1006 * or even "starving". */
1007 wait_event(device
->al_wait
, lc_try_lock_for_transaction(device
->act_log
));
1009 /* mark current on-disk bitmap and activity log as unreliable */
1010 prev_flags
= md
->flags
;
1011 md
->flags
|= MDF_FULL_SYNC
| MDF_AL_DISABLED
;
1012 drbd_md_write(device
, buffer
);
1014 drbd_al_initialize(device
, buffer
);
1016 drbd_info(device
, "Writing the whole bitmap, %s\n",
1017 la_size_changed
&& md_moved
? "size changed and md moved" :
1018 la_size_changed
? "size changed" : "md moved");
1019 /* next line implicitly does drbd_suspend_io()+drbd_resume_io() */
1020 drbd_bitmap_io(device
, md_moved
? &drbd_bm_write_all
: &drbd_bm_write
,
1021 "size changed", BM_LOCKED_MASK
);
1023 /* on-disk bitmap and activity log is authoritative again
1024 * (unless there was an IO error meanwhile...) */
1025 md
->flags
= prev_flags
;
1026 drbd_md_write(device
, buffer
);
1029 drbd_info(device
, "Changed AL layout to al-stripes = %d, al-stripe-size-kB = %d\n",
1030 md
->al_stripes
, md
->al_stripe_size_4k
* 4);
1033 if (size
> prev
.last_agreed_sect
)
1034 rv
= prev
.last_agreed_sect
? DS_GREW
: DS_GREW_FROM_ZERO
;
1035 if (size
< prev
.last_agreed_sect
)
1040 /* restore previous offset and sizes */
1041 md
->la_size_sect
= prev
.last_agreed_sect
;
1042 md
->md_offset
= prev
.md_offset
;
1043 md
->al_offset
= prev
.al_offset
;
1044 md
->bm_offset
= prev
.bm_offset
;
1045 md
->md_size_sect
= prev
.md_size_sect
;
1046 md
->al_stripes
= prev
.al_stripes
;
1047 md
->al_stripe_size_4k
= prev
.al_stripe_size_4k
;
1048 md
->al_size_4k
= (u64
)prev
.al_stripes
* prev
.al_stripe_size_4k
;
1050 lc_unlock(device
->act_log
);
1051 wake_up(&device
->al_wait
);
1052 drbd_md_put_buffer(device
);
1053 drbd_resume_io(device
);
1059 drbd_new_dev_size(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
,
1060 sector_t u_size
, int assume_peer_has_space
)
1062 sector_t p_size
= device
->p_size
; /* partner's disk size. */
1063 sector_t la_size_sect
= bdev
->md
.la_size_sect
; /* last agreed size. */
1064 sector_t m_size
; /* my size */
1067 m_size
= drbd_get_max_capacity(bdev
);
1069 if (device
->state
.conn
< C_CONNECTED
&& assume_peer_has_space
) {
1070 drbd_warn(device
, "Resize while not connected was forced by the user!\n");
1074 if (p_size
&& m_size
) {
1075 size
= min_t(sector_t
, p_size
, m_size
);
1078 size
= la_size_sect
;
1079 if (m_size
&& m_size
< size
)
1081 if (p_size
&& p_size
< size
)
1092 drbd_err(device
, "Both nodes diskless!\n");
1096 drbd_err(device
, "Requested disk size is too big (%lu > %lu)\n",
1097 (unsigned long)u_size
>>1, (unsigned long)size
>>1);
1106 * drbd_check_al_size() - Ensures that the AL is of the right size
1107 * @device: DRBD device.
1109 * Returns -EBUSY if current al lru is still used, -ENOMEM when allocation
1110 * failed, and 0 on success. You should call drbd_md_sync() after you called
1113 static int drbd_check_al_size(struct drbd_device
*device
, struct disk_conf
*dc
)
1115 struct lru_cache
*n
, *t
;
1116 struct lc_element
*e
;
1117 unsigned int in_use
;
1120 if (device
->act_log
&&
1121 device
->act_log
->nr_elements
== dc
->al_extents
)
1125 t
= device
->act_log
;
1126 n
= lc_create("act_log", drbd_al_ext_cache
, AL_UPDATES_PER_TRANSACTION
,
1127 dc
->al_extents
, sizeof(struct lc_element
), 0);
1130 drbd_err(device
, "Cannot allocate act_log lru!\n");
1133 spin_lock_irq(&device
->al_lock
);
1135 for (i
= 0; i
< t
->nr_elements
; i
++) {
1136 e
= lc_element_by_index(t
, i
);
1138 drbd_err(device
, "refcnt(%d)==%d\n",
1139 e
->lc_number
, e
->refcnt
);
1140 in_use
+= e
->refcnt
;
1144 device
->act_log
= n
;
1145 spin_unlock_irq(&device
->al_lock
);
1147 drbd_err(device
, "Activity log still in use!\n");
1153 drbd_md_mark_dirty(device
); /* we changed device->act_log->nr_elemens */
1157 static void drbd_setup_queue_param(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
,
1158 unsigned int max_bio_size
)
1160 struct request_queue
* const q
= device
->rq_queue
;
1161 unsigned int max_hw_sectors
= max_bio_size
>> 9;
1162 unsigned int max_segments
= 0;
1163 struct request_queue
*b
= NULL
;
1166 b
= bdev
->backing_bdev
->bd_disk
->queue
;
1168 max_hw_sectors
= min(queue_max_hw_sectors(b
), max_bio_size
>> 9);
1170 max_segments
= rcu_dereference(device
->ldev
->disk_conf
)->max_bio_bvecs
;
1173 blk_set_stacking_limits(&q
->limits
);
1174 blk_queue_max_write_same_sectors(q
, 0);
1177 blk_queue_logical_block_size(q
, 512);
1178 blk_queue_max_hw_sectors(q
, max_hw_sectors
);
1179 /* This is the workaround for "bio would need to, but cannot, be split" */
1180 blk_queue_max_segments(q
, max_segments
? max_segments
: BLK_MAX_SEGMENTS
);
1181 blk_queue_segment_boundary(q
, PAGE_SIZE
-1);
1184 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
1186 blk_queue_max_discard_sectors(q
, DRBD_MAX_DISCARD_SECTORS
);
1188 if (blk_queue_discard(b
) &&
1189 (connection
->cstate
< C_CONNECTED
|| connection
->agreed_features
& FF_TRIM
)) {
1190 /* We don't care, stacking below should fix it for the local device.
1191 * Whether or not it is a suitable granularity on the remote device
1192 * is not our problem, really. If you care, you need to
1193 * use devices with similar topology on all peers. */
1194 q
->limits
.discard_granularity
= 512;
1195 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, q
);
1197 blk_queue_max_discard_sectors(q
, 0);
1198 queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD
, q
);
1199 q
->limits
.discard_granularity
= 0;
1202 blk_queue_stack_limits(q
, b
);
1204 if (q
->backing_dev_info
.ra_pages
!= b
->backing_dev_info
.ra_pages
) {
1205 drbd_info(device
, "Adjusting my ra_pages to backing device's (%lu -> %lu)\n",
1206 q
->backing_dev_info
.ra_pages
,
1207 b
->backing_dev_info
.ra_pages
);
1208 q
->backing_dev_info
.ra_pages
= b
->backing_dev_info
.ra_pages
;
1211 /* To avoid confusion, if this queue does not support discard, clear
1212 * max_discard_sectors, which is what lsblk -D reports to the user. */
1213 if (!blk_queue_discard(q
)) {
1214 blk_queue_max_discard_sectors(q
, 0);
1215 q
->limits
.discard_granularity
= 0;
1219 void drbd_reconsider_max_bio_size(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
)
1221 unsigned int now
, new, local
, peer
;
1223 now
= queue_max_hw_sectors(device
->rq_queue
) << 9;
1224 local
= device
->local_max_bio_size
; /* Eventually last known value, from volatile memory */
1225 peer
= device
->peer_max_bio_size
; /* Eventually last known value, from meta data */
1228 local
= queue_max_hw_sectors(bdev
->backing_bdev
->bd_disk
->queue
) << 9;
1229 device
->local_max_bio_size
= local
;
1231 local
= min(local
, DRBD_MAX_BIO_SIZE
);
1233 /* We may ignore peer limits if the peer is modern enough.
1234 Because new from 8.3.8 onwards the peer can use multiple
1235 BIOs for a single peer_request */
1236 if (device
->state
.conn
>= C_WF_REPORT_PARAMS
) {
1237 if (first_peer_device(device
)->connection
->agreed_pro_version
< 94)
1238 peer
= min(device
->peer_max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
1239 /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
1240 else if (first_peer_device(device
)->connection
->agreed_pro_version
== 94)
1241 peer
= DRBD_MAX_SIZE_H80_PACKET
;
1242 else if (first_peer_device(device
)->connection
->agreed_pro_version
< 100)
1243 peer
= DRBD_MAX_BIO_SIZE_P95
; /* drbd 8.3.8 onwards, before 8.4.0 */
1245 peer
= DRBD_MAX_BIO_SIZE
;
1247 /* We may later detach and re-attach on a disconnected Primary.
1248 * Avoid this setting to jump back in that case.
1249 * We want to store what we know the peer DRBD can handle,
1250 * not what the peer IO backend can handle. */
1251 if (peer
> device
->peer_max_bio_size
)
1252 device
->peer_max_bio_size
= peer
;
1254 new = min(local
, peer
);
1256 if (device
->state
.role
== R_PRIMARY
&& new < now
)
1257 drbd_err(device
, "ASSERT FAILED new < now; (%u < %u)\n", new, now
);
1260 drbd_info(device
, "max BIO size = %u\n", new);
1262 drbd_setup_queue_param(device
, bdev
, new);
1265 /* Starts the worker thread */
1266 static void conn_reconfig_start(struct drbd_connection
*connection
)
1268 drbd_thread_start(&connection
->worker
);
1269 drbd_flush_workqueue(&connection
->sender_work
);
1272 /* if still unconfigured, stops worker again. */
1273 static void conn_reconfig_done(struct drbd_connection
*connection
)
1276 spin_lock_irq(&connection
->resource
->req_lock
);
1277 stop_threads
= conn_all_vols_unconf(connection
) &&
1278 connection
->cstate
== C_STANDALONE
;
1279 spin_unlock_irq(&connection
->resource
->req_lock
);
1281 /* ack_receiver thread and ack_sender workqueue are implicitly
1282 * stopped by receiver in conn_disconnect() */
1283 drbd_thread_stop(&connection
->receiver
);
1284 drbd_thread_stop(&connection
->worker
);
1288 /* Make sure IO is suspended before calling this function(). */
1289 static void drbd_suspend_al(struct drbd_device
*device
)
1293 if (!lc_try_lock(device
->act_log
)) {
1294 drbd_warn(device
, "Failed to lock al in drbd_suspend_al()\n");
1298 drbd_al_shrink(device
);
1299 spin_lock_irq(&device
->resource
->req_lock
);
1300 if (device
->state
.conn
< C_CONNECTED
)
1301 s
= !test_and_set_bit(AL_SUSPENDED
, &device
->flags
);
1302 spin_unlock_irq(&device
->resource
->req_lock
);
1303 lc_unlock(device
->act_log
);
1306 drbd_info(device
, "Suspended AL updates\n");
1310 static bool should_set_defaults(struct genl_info
*info
)
1312 unsigned flags
= ((struct drbd_genlmsghdr
*)info
->userhdr
)->flags
;
1313 return 0 != (flags
& DRBD_GENL_F_SET_DEFAULTS
);
1316 static unsigned int drbd_al_extents_max(struct drbd_backing_dev
*bdev
)
1318 /* This is limited by 16 bit "slot" numbers,
1319 * and by available on-disk context storage.
1321 * Also (u16)~0 is special (denotes a "free" extent).
1323 * One transaction occupies one 4kB on-disk block,
1324 * we have n such blocks in the on disk ring buffer,
1325 * the "current" transaction may fail (n-1),
1326 * and there is 919 slot numbers context information per transaction.
1328 * 72 transaction blocks amounts to more than 2**16 context slots,
1329 * so cap there first.
1331 const unsigned int max_al_nr
= DRBD_AL_EXTENTS_MAX
;
1332 const unsigned int sufficient_on_disk
=
1333 (max_al_nr
+ AL_CONTEXT_PER_TRANSACTION
-1)
1334 /AL_CONTEXT_PER_TRANSACTION
;
1336 unsigned int al_size_4k
= bdev
->md
.al_size_4k
;
1338 if (al_size_4k
> sufficient_on_disk
)
1341 return (al_size_4k
- 1) * AL_CONTEXT_PER_TRANSACTION
;
1344 static bool write_ordering_changed(struct disk_conf
*a
, struct disk_conf
*b
)
1346 return a
->disk_barrier
!= b
->disk_barrier
||
1347 a
->disk_flushes
!= b
->disk_flushes
||
1348 a
->disk_drain
!= b
->disk_drain
;
1351 int drbd_adm_disk_opts(struct sk_buff
*skb
, struct genl_info
*info
)
1353 struct drbd_config_context adm_ctx
;
1354 enum drbd_ret_code retcode
;
1355 struct drbd_device
*device
;
1356 struct disk_conf
*new_disk_conf
, *old_disk_conf
;
1357 struct fifo_buffer
*old_plan
= NULL
, *new_plan
= NULL
;
1360 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
1361 if (!adm_ctx
.reply_skb
)
1363 if (retcode
!= NO_ERROR
)
1366 device
= adm_ctx
.device
;
1367 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
1369 /* we also need a disk
1370 * to change the options on */
1371 if (!get_ldev(device
)) {
1372 retcode
= ERR_NO_DISK
;
1376 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1377 if (!new_disk_conf
) {
1378 retcode
= ERR_NOMEM
;
1382 mutex_lock(&device
->resource
->conf_update
);
1383 old_disk_conf
= device
->ldev
->disk_conf
;
1384 *new_disk_conf
= *old_disk_conf
;
1385 if (should_set_defaults(info
))
1386 set_disk_conf_defaults(new_disk_conf
);
1388 err
= disk_conf_from_attrs_for_change(new_disk_conf
, info
);
1389 if (err
&& err
!= -ENOMSG
) {
1390 retcode
= ERR_MANDATORY_TAG
;
1391 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
1395 if (!expect(new_disk_conf
->resync_rate
>= 1))
1396 new_disk_conf
->resync_rate
= 1;
1398 if (new_disk_conf
->al_extents
< DRBD_AL_EXTENTS_MIN
)
1399 new_disk_conf
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1400 if (new_disk_conf
->al_extents
> drbd_al_extents_max(device
->ldev
))
1401 new_disk_conf
->al_extents
= drbd_al_extents_max(device
->ldev
);
1403 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1404 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1406 fifo_size
= (new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
;
1407 if (fifo_size
!= device
->rs_plan_s
->size
) {
1408 new_plan
= fifo_alloc(fifo_size
);
1410 drbd_err(device
, "kmalloc of fifo_buffer failed");
1411 retcode
= ERR_NOMEM
;
1416 drbd_suspend_io(device
);
1417 wait_event(device
->al_wait
, lc_try_lock(device
->act_log
));
1418 drbd_al_shrink(device
);
1419 err
= drbd_check_al_size(device
, new_disk_conf
);
1420 lc_unlock(device
->act_log
);
1421 wake_up(&device
->al_wait
);
1422 drbd_resume_io(device
);
1425 retcode
= ERR_NOMEM
;
1429 lock_all_resources();
1430 retcode
= drbd_resync_after_valid(device
, new_disk_conf
->resync_after
);
1431 if (retcode
== NO_ERROR
) {
1432 rcu_assign_pointer(device
->ldev
->disk_conf
, new_disk_conf
);
1433 drbd_resync_after_changed(device
);
1435 unlock_all_resources();
1437 if (retcode
!= NO_ERROR
)
1441 old_plan
= device
->rs_plan_s
;
1442 rcu_assign_pointer(device
->rs_plan_s
, new_plan
);
1445 mutex_unlock(&device
->resource
->conf_update
);
1447 if (new_disk_conf
->al_updates
)
1448 device
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
1450 device
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
1452 if (new_disk_conf
->md_flushes
)
1453 clear_bit(MD_NO_FUA
, &device
->flags
);
1455 set_bit(MD_NO_FUA
, &device
->flags
);
1457 if (write_ordering_changed(old_disk_conf
, new_disk_conf
))
1458 drbd_bump_write_ordering(device
->resource
, NULL
, WO_BDEV_FLUSH
);
1460 drbd_md_sync(device
);
1462 if (device
->state
.conn
>= C_CONNECTED
) {
1463 struct drbd_peer_device
*peer_device
;
1465 for_each_peer_device(peer_device
, device
)
1466 drbd_send_sync_param(peer_device
);
1470 kfree(old_disk_conf
);
1472 mod_timer(&device
->request_timer
, jiffies
+ HZ
);
1476 mutex_unlock(&device
->resource
->conf_update
);
1478 kfree(new_disk_conf
);
1483 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
1485 drbd_adm_finish(&adm_ctx
, info
, retcode
);
1489 static struct block_device
*open_backing_dev(struct drbd_device
*device
,
1490 const char *bdev_path
, void *claim_ptr
, bool do_bd_link
)
1492 struct block_device
*bdev
;
1495 bdev
= blkdev_get_by_path(bdev_path
,
1496 FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
, claim_ptr
);
1498 drbd_err(device
, "open(\"%s\") failed with %ld\n",
1499 bdev_path
, PTR_ERR(bdev
));
1506 err
= bd_link_disk_holder(bdev
, device
->vdisk
);
1508 blkdev_put(bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1509 drbd_err(device
, "bd_link_disk_holder(\"%s\", ...) failed with %d\n",
1511 bdev
= ERR_PTR(err
);
1516 static int open_backing_devices(struct drbd_device
*device
,
1517 struct disk_conf
*new_disk_conf
,
1518 struct drbd_backing_dev
*nbc
)
1520 struct block_device
*bdev
;
1522 bdev
= open_backing_dev(device
, new_disk_conf
->backing_dev
, device
, true);
1524 return ERR_OPEN_DISK
;
1525 nbc
->backing_bdev
= bdev
;
1528 * meta_dev_idx >= 0: external fixed size, possibly multiple
1529 * drbd sharing one meta device. TODO in that case, paranoia
1530 * check that [md_bdev, meta_dev_idx] is not yet used by some
1531 * other drbd minor! (if you use drbd.conf + drbdadm, that
1532 * should check it for you already; but if you don't, or
1533 * someone fooled it, we need to double check here)
1535 bdev
= open_backing_dev(device
, new_disk_conf
->meta_dev
,
1536 /* claim ptr: device, if claimed exclusively; shared drbd_m_holder,
1537 * if potentially shared with other drbd minors */
1538 (new_disk_conf
->meta_dev_idx
< 0) ? (void*)device
: (void*)drbd_m_holder
,
1539 /* avoid double bd_claim_by_disk() for the same (source,target) tuple,
1540 * as would happen with internal metadata. */
1541 (new_disk_conf
->meta_dev_idx
!= DRBD_MD_INDEX_FLEX_INT
&&
1542 new_disk_conf
->meta_dev_idx
!= DRBD_MD_INDEX_INTERNAL
));
1544 return ERR_OPEN_MD_DISK
;
1545 nbc
->md_bdev
= bdev
;
1549 static void close_backing_dev(struct drbd_device
*device
, struct block_device
*bdev
,
1555 bd_unlink_disk_holder(bdev
, device
->vdisk
);
1556 blkdev_put(bdev
, FMODE_READ
| FMODE_WRITE
| FMODE_EXCL
);
1559 void drbd_backing_dev_free(struct drbd_device
*device
, struct drbd_backing_dev
*ldev
)
1564 close_backing_dev(device
, ldev
->md_bdev
, ldev
->md_bdev
!= ldev
->backing_bdev
);
1565 close_backing_dev(device
, ldev
->backing_bdev
, true);
1567 kfree(ldev
->disk_conf
);
1571 int drbd_adm_attach(struct sk_buff
*skb
, struct genl_info
*info
)
1573 struct drbd_config_context adm_ctx
;
1574 struct drbd_device
*device
;
1575 struct drbd_peer_device
*peer_device
;
1576 struct drbd_connection
*connection
;
1578 enum drbd_ret_code retcode
;
1579 enum determine_dev_size dd
;
1580 sector_t max_possible_sectors
;
1581 sector_t min_md_device_sectors
;
1582 struct drbd_backing_dev
*nbc
= NULL
; /* new_backing_conf */
1583 struct disk_conf
*new_disk_conf
= NULL
;
1584 struct lru_cache
*resync_lru
= NULL
;
1585 struct fifo_buffer
*new_plan
= NULL
;
1586 union drbd_state ns
, os
;
1587 enum drbd_state_rv rv
;
1588 struct net_conf
*nc
;
1590 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
1591 if (!adm_ctx
.reply_skb
)
1593 if (retcode
!= NO_ERROR
)
1596 device
= adm_ctx
.device
;
1597 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
1598 peer_device
= first_peer_device(device
);
1599 connection
= peer_device
->connection
;
1600 conn_reconfig_start(connection
);
1602 /* if you want to reconfigure, please tear down first */
1603 if (device
->state
.disk
> D_DISKLESS
) {
1604 retcode
= ERR_DISK_CONFIGURED
;
1607 /* It may just now have detached because of IO error. Make sure
1608 * drbd_ldev_destroy is done already, we may end up here very fast,
1609 * e.g. if someone calls attach from the on-io-error handler,
1610 * to realize a "hot spare" feature (not that I'd recommend that) */
1611 wait_event(device
->misc_wait
, !test_bit(GOING_DISKLESS
, &device
->flags
));
1613 /* make sure there is no leftover from previous force-detach attempts */
1614 clear_bit(FORCE_DETACH
, &device
->flags
);
1615 clear_bit(WAS_IO_ERROR
, &device
->flags
);
1616 clear_bit(WAS_READ_ERROR
, &device
->flags
);
1618 /* and no leftover from previously aborted resync or verify, either */
1619 device
->rs_total
= 0;
1620 device
->rs_failed
= 0;
1621 atomic_set(&device
->rs_pending_cnt
, 0);
1623 /* allocation not in the IO path, drbdsetup context */
1624 nbc
= kzalloc(sizeof(struct drbd_backing_dev
), GFP_KERNEL
);
1626 retcode
= ERR_NOMEM
;
1629 spin_lock_init(&nbc
->md
.uuid_lock
);
1631 new_disk_conf
= kzalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
1632 if (!new_disk_conf
) {
1633 retcode
= ERR_NOMEM
;
1636 nbc
->disk_conf
= new_disk_conf
;
1638 set_disk_conf_defaults(new_disk_conf
);
1639 err
= disk_conf_from_attrs(new_disk_conf
, info
);
1641 retcode
= ERR_MANDATORY_TAG
;
1642 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
1646 if (new_disk_conf
->c_plan_ahead
> DRBD_C_PLAN_AHEAD_MAX
)
1647 new_disk_conf
->c_plan_ahead
= DRBD_C_PLAN_AHEAD_MAX
;
1649 new_plan
= fifo_alloc((new_disk_conf
->c_plan_ahead
* 10 * SLEEP_TIME
) / HZ
);
1651 retcode
= ERR_NOMEM
;
1655 if (new_disk_conf
->meta_dev_idx
< DRBD_MD_INDEX_FLEX_INT
) {
1656 retcode
= ERR_MD_IDX_INVALID
;
1661 nc
= rcu_dereference(connection
->net_conf
);
1663 if (new_disk_conf
->fencing
== FP_STONITH
&& nc
->wire_protocol
== DRBD_PROT_A
) {
1665 retcode
= ERR_STONITH_AND_PROT_A
;
1671 retcode
= open_backing_devices(device
, new_disk_conf
, nbc
);
1672 if (retcode
!= NO_ERROR
)
1675 if ((nbc
->backing_bdev
== nbc
->md_bdev
) !=
1676 (new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_INTERNAL
||
1677 new_disk_conf
->meta_dev_idx
== DRBD_MD_INDEX_FLEX_INT
)) {
1678 retcode
= ERR_MD_IDX_INVALID
;
1682 resync_lru
= lc_create("resync", drbd_bm_ext_cache
,
1683 1, 61, sizeof(struct bm_extent
),
1684 offsetof(struct bm_extent
, lce
));
1686 retcode
= ERR_NOMEM
;
1690 /* Read our meta data super block early.
1691 * This also sets other on-disk offsets. */
1692 retcode
= drbd_md_read(device
, nbc
);
1693 if (retcode
!= NO_ERROR
)
1696 if (new_disk_conf
->al_extents
< DRBD_AL_EXTENTS_MIN
)
1697 new_disk_conf
->al_extents
= DRBD_AL_EXTENTS_MIN
;
1698 if (new_disk_conf
->al_extents
> drbd_al_extents_max(nbc
))
1699 new_disk_conf
->al_extents
= drbd_al_extents_max(nbc
);
1701 if (drbd_get_max_capacity(nbc
) < new_disk_conf
->disk_size
) {
1702 drbd_err(device
, "max capacity %llu smaller than disk size %llu\n",
1703 (unsigned long long) drbd_get_max_capacity(nbc
),
1704 (unsigned long long) new_disk_conf
->disk_size
);
1705 retcode
= ERR_DISK_TOO_SMALL
;
1709 if (new_disk_conf
->meta_dev_idx
< 0) {
1710 max_possible_sectors
= DRBD_MAX_SECTORS_FLEX
;
1711 /* at least one MB, otherwise it does not make sense */
1712 min_md_device_sectors
= (2<<10);
1714 max_possible_sectors
= DRBD_MAX_SECTORS
;
1715 min_md_device_sectors
= MD_128MB_SECT
* (new_disk_conf
->meta_dev_idx
+ 1);
1718 if (drbd_get_capacity(nbc
->md_bdev
) < min_md_device_sectors
) {
1719 retcode
= ERR_MD_DISK_TOO_SMALL
;
1720 drbd_warn(device
, "refusing attach: md-device too small, "
1721 "at least %llu sectors needed for this meta-disk type\n",
1722 (unsigned long long) min_md_device_sectors
);
1726 /* Make sure the new disk is big enough
1727 * (we may currently be R_PRIMARY with no local disk...) */
1728 if (drbd_get_max_capacity(nbc
) <
1729 drbd_get_capacity(device
->this_bdev
)) {
1730 retcode
= ERR_DISK_TOO_SMALL
;
1734 nbc
->known_size
= drbd_get_capacity(nbc
->backing_bdev
);
1736 if (nbc
->known_size
> max_possible_sectors
) {
1737 drbd_warn(device
, "==> truncating very big lower level device "
1738 "to currently maximum possible %llu sectors <==\n",
1739 (unsigned long long) max_possible_sectors
);
1740 if (new_disk_conf
->meta_dev_idx
>= 0)
1741 drbd_warn(device
, "==>> using internal or flexible "
1742 "meta data may help <<==\n");
1745 drbd_suspend_io(device
);
1746 /* also wait for the last barrier ack. */
1747 /* FIXME see also https://daiquiri.linbit/cgi-bin/bugzilla/show_bug.cgi?id=171
1748 * We need a way to either ignore barrier acks for barriers sent before a device
1749 * was attached, or a way to wait for all pending barrier acks to come in.
1750 * As barriers are counted per resource,
1751 * we'd need to suspend io on all devices of a resource.
1753 wait_event(device
->misc_wait
, !atomic_read(&device
->ap_pending_cnt
) || drbd_suspended(device
));
1754 /* and for any other previously queued work */
1755 drbd_flush_workqueue(&connection
->sender_work
);
1757 rv
= _drbd_request_state(device
, NS(disk
, D_ATTACHING
), CS_VERBOSE
);
1758 retcode
= rv
; /* FIXME: Type mismatch. */
1759 drbd_resume_io(device
);
1760 if (rv
< SS_SUCCESS
)
1763 if (!get_ldev_if_state(device
, D_ATTACHING
))
1764 goto force_diskless
;
1766 if (!device
->bitmap
) {
1767 if (drbd_bm_init(device
)) {
1768 retcode
= ERR_NOMEM
;
1769 goto force_diskless_dec
;
1773 if (device
->state
.conn
< C_CONNECTED
&&
1774 device
->state
.role
== R_PRIMARY
&& device
->ed_uuid
&&
1775 (device
->ed_uuid
& ~((u64
)1)) != (nbc
->md
.uuid
[UI_CURRENT
] & ~((u64
)1))) {
1776 drbd_err(device
, "Can only attach to data with current UUID=%016llX\n",
1777 (unsigned long long)device
->ed_uuid
);
1778 retcode
= ERR_DATA_NOT_CURRENT
;
1779 goto force_diskless_dec
;
1782 /* Since we are diskless, fix the activity log first... */
1783 if (drbd_check_al_size(device
, new_disk_conf
)) {
1784 retcode
= ERR_NOMEM
;
1785 goto force_diskless_dec
;
1788 /* Prevent shrinking of consistent devices ! */
1789 if (drbd_md_test_flag(nbc
, MDF_CONSISTENT
) &&
1790 drbd_new_dev_size(device
, nbc
, nbc
->disk_conf
->disk_size
, 0) < nbc
->md
.la_size_sect
) {
1791 drbd_warn(device
, "refusing to truncate a consistent device\n");
1792 retcode
= ERR_DISK_TOO_SMALL
;
1793 goto force_diskless_dec
;
1796 lock_all_resources();
1797 retcode
= drbd_resync_after_valid(device
, new_disk_conf
->resync_after
);
1798 if (retcode
!= NO_ERROR
) {
1799 unlock_all_resources();
1800 goto force_diskless_dec
;
1803 /* Reset the "barriers don't work" bits here, then force meta data to
1804 * be written, to ensure we determine if barriers are supported. */
1805 if (new_disk_conf
->md_flushes
)
1806 clear_bit(MD_NO_FUA
, &device
->flags
);
1808 set_bit(MD_NO_FUA
, &device
->flags
);
1810 /* Point of no return reached.
1811 * Devices and memory are no longer released by error cleanup below.
1812 * now device takes over responsibility, and the state engine should
1813 * clean it up somewhere. */
1814 D_ASSERT(device
, device
->ldev
== NULL
);
1816 device
->resync
= resync_lru
;
1817 device
->rs_plan_s
= new_plan
;
1820 new_disk_conf
= NULL
;
1823 drbd_resync_after_changed(device
);
1824 drbd_bump_write_ordering(device
->resource
, device
->ldev
, WO_BDEV_FLUSH
);
1825 unlock_all_resources();
1827 if (drbd_md_test_flag(device
->ldev
, MDF_CRASHED_PRIMARY
))
1828 set_bit(CRASHED_PRIMARY
, &device
->flags
);
1830 clear_bit(CRASHED_PRIMARY
, &device
->flags
);
1832 if (drbd_md_test_flag(device
->ldev
, MDF_PRIMARY_IND
) &&
1833 !(device
->state
.role
== R_PRIMARY
&& device
->resource
->susp_nod
))
1834 set_bit(CRASHED_PRIMARY
, &device
->flags
);
1836 device
->send_cnt
= 0;
1837 device
->recv_cnt
= 0;
1838 device
->read_cnt
= 0;
1839 device
->writ_cnt
= 0;
1841 drbd_reconsider_max_bio_size(device
, device
->ldev
);
1843 /* If I am currently not R_PRIMARY,
1844 * but meta data primary indicator is set,
1845 * I just now recover from a hard crash,
1846 * and have been R_PRIMARY before that crash.
1848 * Now, if I had no connection before that crash
1849 * (have been degraded R_PRIMARY), chances are that
1850 * I won't find my peer now either.
1852 * In that case, and _only_ in that case,
1853 * we use the degr-wfc-timeout instead of the default,
1854 * so we can automatically recover from a crash of a
1855 * degraded but active "cluster" after a certain timeout.
1857 clear_bit(USE_DEGR_WFC_T
, &device
->flags
);
1858 if (device
->state
.role
!= R_PRIMARY
&&
1859 drbd_md_test_flag(device
->ldev
, MDF_PRIMARY_IND
) &&
1860 !drbd_md_test_flag(device
->ldev
, MDF_CONNECTED_IND
))
1861 set_bit(USE_DEGR_WFC_T
, &device
->flags
);
1863 dd
= drbd_determine_dev_size(device
, 0, NULL
);
1864 if (dd
<= DS_ERROR
) {
1865 retcode
= ERR_NOMEM_BITMAP
;
1866 goto force_diskless_dec
;
1867 } else if (dd
== DS_GREW
)
1868 set_bit(RESYNC_AFTER_NEG
, &device
->flags
);
1870 if (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
) ||
1871 (test_bit(CRASHED_PRIMARY
, &device
->flags
) &&
1872 drbd_md_test_flag(device
->ldev
, MDF_AL_DISABLED
))) {
1873 drbd_info(device
, "Assuming that all blocks are out of sync "
1874 "(aka FullSync)\n");
1875 if (drbd_bitmap_io(device
, &drbd_bmio_set_n_write
,
1876 "set_n_write from attaching", BM_LOCKED_MASK
)) {
1877 retcode
= ERR_IO_MD_DISK
;
1878 goto force_diskless_dec
;
1881 if (drbd_bitmap_io(device
, &drbd_bm_read
,
1882 "read from attaching", BM_LOCKED_MASK
)) {
1883 retcode
= ERR_IO_MD_DISK
;
1884 goto force_diskless_dec
;
1888 if (_drbd_bm_total_weight(device
) == drbd_bm_bits(device
))
1889 drbd_suspend_al(device
); /* IO is still suspended here... */
1891 spin_lock_irq(&device
->resource
->req_lock
);
1892 os
= drbd_read_state(device
);
1894 /* If MDF_CONSISTENT is not set go into inconsistent state,
1895 otherwise investigate MDF_WasUpToDate...
1896 If MDF_WAS_UP_TO_DATE is not set go into D_OUTDATED disk state,
1897 otherwise into D_CONSISTENT state.
1899 if (drbd_md_test_flag(device
->ldev
, MDF_CONSISTENT
)) {
1900 if (drbd_md_test_flag(device
->ldev
, MDF_WAS_UP_TO_DATE
))
1901 ns
.disk
= D_CONSISTENT
;
1903 ns
.disk
= D_OUTDATED
;
1905 ns
.disk
= D_INCONSISTENT
;
1908 if (drbd_md_test_flag(device
->ldev
, MDF_PEER_OUT_DATED
))
1909 ns
.pdsk
= D_OUTDATED
;
1912 if (ns
.disk
== D_CONSISTENT
&&
1913 (ns
.pdsk
== D_OUTDATED
|| rcu_dereference(device
->ldev
->disk_conf
)->fencing
== FP_DONT_CARE
))
1914 ns
.disk
= D_UP_TO_DATE
;
1916 /* All tests on MDF_PRIMARY_IND, MDF_CONNECTED_IND,
1917 MDF_CONSISTENT and MDF_WAS_UP_TO_DATE must happen before
1918 this point, because drbd_request_state() modifies these
1921 if (rcu_dereference(device
->ldev
->disk_conf
)->al_updates
)
1922 device
->ldev
->md
.flags
&= ~MDF_AL_DISABLED
;
1924 device
->ldev
->md
.flags
|= MDF_AL_DISABLED
;
1928 /* In case we are C_CONNECTED postpone any decision on the new disk
1929 state after the negotiation phase. */
1930 if (device
->state
.conn
== C_CONNECTED
) {
1931 device
->new_state_tmp
.i
= ns
.i
;
1933 ns
.disk
= D_NEGOTIATING
;
1935 /* We expect to receive up-to-date UUIDs soon.
1936 To avoid a race in receive_state, free p_uuid while
1937 holding req_lock. I.e. atomic with the state change */
1938 kfree(device
->p_uuid
);
1939 device
->p_uuid
= NULL
;
1942 rv
= _drbd_set_state(device
, ns
, CS_VERBOSE
, NULL
);
1943 spin_unlock_irq(&device
->resource
->req_lock
);
1945 if (rv
< SS_SUCCESS
)
1946 goto force_diskless_dec
;
1948 mod_timer(&device
->request_timer
, jiffies
+ HZ
);
1950 if (device
->state
.role
== R_PRIMARY
)
1951 device
->ldev
->md
.uuid
[UI_CURRENT
] |= (u64
)1;
1953 device
->ldev
->md
.uuid
[UI_CURRENT
] &= ~(u64
)1;
1955 drbd_md_mark_dirty(device
);
1956 drbd_md_sync(device
);
1958 kobject_uevent(&disk_to_dev(device
->vdisk
)->kobj
, KOBJ_CHANGE
);
1960 conn_reconfig_done(connection
);
1961 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
1962 drbd_adm_finish(&adm_ctx
, info
, retcode
);
1968 drbd_force_state(device
, NS(disk
, D_DISKLESS
));
1969 drbd_md_sync(device
);
1971 conn_reconfig_done(connection
);
1973 close_backing_dev(device
, nbc
->md_bdev
, nbc
->md_bdev
!= nbc
->backing_bdev
);
1974 close_backing_dev(device
, nbc
->backing_bdev
, true);
1977 kfree(new_disk_conf
);
1978 lc_destroy(resync_lru
);
1980 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
1982 drbd_adm_finish(&adm_ctx
, info
, retcode
);
1986 static int adm_detach(struct drbd_device
*device
, int force
)
1988 enum drbd_state_rv retcode
;
1993 set_bit(FORCE_DETACH
, &device
->flags
);
1994 drbd_force_state(device
, NS(disk
, D_FAILED
));
1995 retcode
= SS_SUCCESS
;
1999 drbd_suspend_io(device
); /* so no-one is stuck in drbd_al_begin_io */
2000 buffer
= drbd_md_get_buffer(device
, __func__
); /* make sure there is no in-flight meta-data IO */
2002 retcode
= drbd_request_state(device
, NS(disk
, D_FAILED
));
2003 drbd_md_put_buffer(device
);
2004 } else /* already <= D_FAILED */
2005 retcode
= SS_NOTHING_TO_DO
;
2006 /* D_FAILED will transition to DISKLESS. */
2007 drbd_resume_io(device
);
2008 ret
= wait_event_interruptible(device
->misc_wait
,
2009 device
->state
.disk
!= D_FAILED
);
2010 if ((int)retcode
== (int)SS_IS_DISKLESS
)
2011 retcode
= SS_NOTHING_TO_DO
;
2018 /* Detaching the disk is a process in multiple stages. First we need to lock
2019 * out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
2020 * Then we transition to D_DISKLESS, and wait for put_ldev() to return all
2021 * internal references as well.
2022 * Only then we have finally detached. */
2023 int drbd_adm_detach(struct sk_buff
*skb
, struct genl_info
*info
)
2025 struct drbd_config_context adm_ctx
;
2026 enum drbd_ret_code retcode
;
2027 struct detach_parms parms
= { };
2030 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2031 if (!adm_ctx
.reply_skb
)
2033 if (retcode
!= NO_ERROR
)
2036 if (info
->attrs
[DRBD_NLA_DETACH_PARMS
]) {
2037 err
= detach_parms_from_attrs(&parms
, info
);
2039 retcode
= ERR_MANDATORY_TAG
;
2040 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2045 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2046 retcode
= adm_detach(adm_ctx
.device
, parms
.force_detach
);
2047 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2049 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2053 static bool conn_resync_running(struct drbd_connection
*connection
)
2055 struct drbd_peer_device
*peer_device
;
2060 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2061 struct drbd_device
*device
= peer_device
->device
;
2062 if (device
->state
.conn
== C_SYNC_SOURCE
||
2063 device
->state
.conn
== C_SYNC_TARGET
||
2064 device
->state
.conn
== C_PAUSED_SYNC_S
||
2065 device
->state
.conn
== C_PAUSED_SYNC_T
) {
2075 static bool conn_ov_running(struct drbd_connection
*connection
)
2077 struct drbd_peer_device
*peer_device
;
2082 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
2083 struct drbd_device
*device
= peer_device
->device
;
2084 if (device
->state
.conn
== C_VERIFY_S
||
2085 device
->state
.conn
== C_VERIFY_T
) {
2095 static enum drbd_ret_code
2096 _check_net_options(struct drbd_connection
*connection
, struct net_conf
*old_net_conf
, struct net_conf
*new_net_conf
)
2098 struct drbd_peer_device
*peer_device
;
2101 if (old_net_conf
&& connection
->cstate
== C_WF_REPORT_PARAMS
&& connection
->agreed_pro_version
< 100) {
2102 if (new_net_conf
->wire_protocol
!= old_net_conf
->wire_protocol
)
2103 return ERR_NEED_APV_100
;
2105 if (new_net_conf
->two_primaries
!= old_net_conf
->two_primaries
)
2106 return ERR_NEED_APV_100
;
2108 if (strcmp(new_net_conf
->integrity_alg
, old_net_conf
->integrity_alg
))
2109 return ERR_NEED_APV_100
;
2112 if (!new_net_conf
->two_primaries
&&
2113 conn_highest_role(connection
) == R_PRIMARY
&&
2114 conn_highest_peer(connection
) == R_PRIMARY
)
2115 return ERR_NEED_ALLOW_TWO_PRI
;
2117 if (new_net_conf
->two_primaries
&&
2118 (new_net_conf
->wire_protocol
!= DRBD_PROT_C
))
2119 return ERR_NOT_PROTO_C
;
2121 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2122 struct drbd_device
*device
= peer_device
->device
;
2123 if (get_ldev(device
)) {
2124 enum drbd_fencing_p fp
= rcu_dereference(device
->ldev
->disk_conf
)->fencing
;
2126 if (new_net_conf
->wire_protocol
== DRBD_PROT_A
&& fp
== FP_STONITH
)
2127 return ERR_STONITH_AND_PROT_A
;
2129 if (device
->state
.role
== R_PRIMARY
&& new_net_conf
->discard_my_data
)
2130 return ERR_DISCARD_IMPOSSIBLE
;
2133 if (new_net_conf
->on_congestion
!= OC_BLOCK
&& new_net_conf
->wire_protocol
!= DRBD_PROT_A
)
2134 return ERR_CONG_NOT_PROTO_A
;
2139 static enum drbd_ret_code
2140 check_net_options(struct drbd_connection
*connection
, struct net_conf
*new_net_conf
)
2142 static enum drbd_ret_code rv
;
2143 struct drbd_peer_device
*peer_device
;
2147 rv
= _check_net_options(connection
, rcu_dereference(connection
->net_conf
), new_net_conf
);
2150 /* connection->peer_devices protected by genl_lock() here */
2151 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2152 struct drbd_device
*device
= peer_device
->device
;
2153 if (!device
->bitmap
) {
2154 if (drbd_bm_init(device
))
2163 struct crypto_ahash
*verify_tfm
;
2164 struct crypto_ahash
*csums_tfm
;
2165 struct crypto_shash
*cram_hmac_tfm
;
2166 struct crypto_ahash
*integrity_tfm
;
2170 alloc_shash(struct crypto_shash
**tfm
, char *tfm_name
, int err_alg
)
2175 *tfm
= crypto_alloc_shash(tfm_name
, 0, 0);
2185 alloc_ahash(struct crypto_ahash
**tfm
, char *tfm_name
, int err_alg
)
2190 *tfm
= crypto_alloc_ahash(tfm_name
, 0, CRYPTO_ALG_ASYNC
);
2199 static enum drbd_ret_code
2200 alloc_crypto(struct crypto
*crypto
, struct net_conf
*new_net_conf
)
2202 char hmac_name
[CRYPTO_MAX_ALG_NAME
];
2203 enum drbd_ret_code rv
;
2205 rv
= alloc_ahash(&crypto
->csums_tfm
, new_net_conf
->csums_alg
,
2209 rv
= alloc_ahash(&crypto
->verify_tfm
, new_net_conf
->verify_alg
,
2213 rv
= alloc_ahash(&crypto
->integrity_tfm
, new_net_conf
->integrity_alg
,
2217 if (new_net_conf
->cram_hmac_alg
[0] != 0) {
2218 snprintf(hmac_name
, CRYPTO_MAX_ALG_NAME
, "hmac(%s)",
2219 new_net_conf
->cram_hmac_alg
);
2221 rv
= alloc_shash(&crypto
->cram_hmac_tfm
, hmac_name
,
2228 static void free_crypto(struct crypto
*crypto
)
2230 crypto_free_shash(crypto
->cram_hmac_tfm
);
2231 crypto_free_ahash(crypto
->integrity_tfm
);
2232 crypto_free_ahash(crypto
->csums_tfm
);
2233 crypto_free_ahash(crypto
->verify_tfm
);
2236 int drbd_adm_net_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2238 struct drbd_config_context adm_ctx
;
2239 enum drbd_ret_code retcode
;
2240 struct drbd_connection
*connection
;
2241 struct net_conf
*old_net_conf
, *new_net_conf
= NULL
;
2243 int ovr
; /* online verify running */
2244 int rsr
; /* re-sync running */
2245 struct crypto crypto
= { };
2247 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2248 if (!adm_ctx
.reply_skb
)
2250 if (retcode
!= NO_ERROR
)
2253 connection
= adm_ctx
.connection
;
2254 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2256 new_net_conf
= kzalloc(sizeof(struct net_conf
), GFP_KERNEL
);
2257 if (!new_net_conf
) {
2258 retcode
= ERR_NOMEM
;
2262 conn_reconfig_start(connection
);
2264 mutex_lock(&connection
->data
.mutex
);
2265 mutex_lock(&connection
->resource
->conf_update
);
2266 old_net_conf
= connection
->net_conf
;
2268 if (!old_net_conf
) {
2269 drbd_msg_put_info(adm_ctx
.reply_skb
, "net conf missing, try connect");
2270 retcode
= ERR_INVALID_REQUEST
;
2274 *new_net_conf
= *old_net_conf
;
2275 if (should_set_defaults(info
))
2276 set_net_conf_defaults(new_net_conf
);
2278 err
= net_conf_from_attrs_for_change(new_net_conf
, info
);
2279 if (err
&& err
!= -ENOMSG
) {
2280 retcode
= ERR_MANDATORY_TAG
;
2281 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2285 retcode
= check_net_options(connection
, new_net_conf
);
2286 if (retcode
!= NO_ERROR
)
2289 /* re-sync running */
2290 rsr
= conn_resync_running(connection
);
2291 if (rsr
&& strcmp(new_net_conf
->csums_alg
, old_net_conf
->csums_alg
)) {
2292 retcode
= ERR_CSUMS_RESYNC_RUNNING
;
2296 /* online verify running */
2297 ovr
= conn_ov_running(connection
);
2298 if (ovr
&& strcmp(new_net_conf
->verify_alg
, old_net_conf
->verify_alg
)) {
2299 retcode
= ERR_VERIFY_RUNNING
;
2303 retcode
= alloc_crypto(&crypto
, new_net_conf
);
2304 if (retcode
!= NO_ERROR
)
2307 rcu_assign_pointer(connection
->net_conf
, new_net_conf
);
2310 crypto_free_ahash(connection
->csums_tfm
);
2311 connection
->csums_tfm
= crypto
.csums_tfm
;
2312 crypto
.csums_tfm
= NULL
;
2315 crypto_free_ahash(connection
->verify_tfm
);
2316 connection
->verify_tfm
= crypto
.verify_tfm
;
2317 crypto
.verify_tfm
= NULL
;
2320 crypto_free_ahash(connection
->integrity_tfm
);
2321 connection
->integrity_tfm
= crypto
.integrity_tfm
;
2322 if (connection
->cstate
>= C_WF_REPORT_PARAMS
&& connection
->agreed_pro_version
>= 100)
2323 /* Do this without trying to take connection->data.mutex again. */
2324 __drbd_send_protocol(connection
, P_PROTOCOL_UPDATE
);
2326 crypto_free_shash(connection
->cram_hmac_tfm
);
2327 connection
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2329 mutex_unlock(&connection
->resource
->conf_update
);
2330 mutex_unlock(&connection
->data
.mutex
);
2332 kfree(old_net_conf
);
2334 if (connection
->cstate
>= C_WF_REPORT_PARAMS
) {
2335 struct drbd_peer_device
*peer_device
;
2338 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
)
2339 drbd_send_sync_param(peer_device
);
2345 mutex_unlock(&connection
->resource
->conf_update
);
2346 mutex_unlock(&connection
->data
.mutex
);
2347 free_crypto(&crypto
);
2348 kfree(new_net_conf
);
2350 conn_reconfig_done(connection
);
2352 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2354 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2358 static void connection_to_info(struct connection_info
*info
,
2359 struct drbd_connection
*connection
)
2361 info
->conn_connection_state
= connection
->cstate
;
2362 info
->conn_role
= conn_highest_peer(connection
);
2365 static void peer_device_to_info(struct peer_device_info
*info
,
2366 struct drbd_peer_device
*peer_device
)
2368 struct drbd_device
*device
= peer_device
->device
;
2370 info
->peer_repl_state
=
2371 max_t(enum drbd_conns
, C_WF_REPORT_PARAMS
, device
->state
.conn
);
2372 info
->peer_disk_state
= device
->state
.pdsk
;
2373 info
->peer_resync_susp_user
= device
->state
.user_isp
;
2374 info
->peer_resync_susp_peer
= device
->state
.peer_isp
;
2375 info
->peer_resync_susp_dependency
= device
->state
.aftr_isp
;
2378 int drbd_adm_connect(struct sk_buff
*skb
, struct genl_info
*info
)
2380 struct connection_info connection_info
;
2381 enum drbd_notification_type flags
;
2382 unsigned int peer_devices
= 0;
2383 struct drbd_config_context adm_ctx
;
2384 struct drbd_peer_device
*peer_device
;
2385 struct net_conf
*old_net_conf
, *new_net_conf
= NULL
;
2386 struct crypto crypto
= { };
2387 struct drbd_resource
*resource
;
2388 struct drbd_connection
*connection
;
2389 enum drbd_ret_code retcode
;
2393 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2395 if (!adm_ctx
.reply_skb
)
2397 if (retcode
!= NO_ERROR
)
2399 if (!(adm_ctx
.my_addr
&& adm_ctx
.peer_addr
)) {
2400 drbd_msg_put_info(adm_ctx
.reply_skb
, "connection endpoint(s) missing");
2401 retcode
= ERR_INVALID_REQUEST
;
2405 /* No need for _rcu here. All reconfiguration is
2406 * strictly serialized on genl_lock(). We are protected against
2407 * concurrent reconfiguration/addition/deletion */
2408 for_each_resource(resource
, &drbd_resources
) {
2409 for_each_connection(connection
, resource
) {
2410 if (nla_len(adm_ctx
.my_addr
) == connection
->my_addr_len
&&
2411 !memcmp(nla_data(adm_ctx
.my_addr
), &connection
->my_addr
,
2412 connection
->my_addr_len
)) {
2413 retcode
= ERR_LOCAL_ADDR
;
2417 if (nla_len(adm_ctx
.peer_addr
) == connection
->peer_addr_len
&&
2418 !memcmp(nla_data(adm_ctx
.peer_addr
), &connection
->peer_addr
,
2419 connection
->peer_addr_len
)) {
2420 retcode
= ERR_PEER_ADDR
;
2426 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2427 connection
= first_connection(adm_ctx
.resource
);
2428 conn_reconfig_start(connection
);
2430 if (connection
->cstate
> C_STANDALONE
) {
2431 retcode
= ERR_NET_CONFIGURED
;
2435 /* allocation not in the IO path, drbdsetup / netlink process context */
2436 new_net_conf
= kzalloc(sizeof(*new_net_conf
), GFP_KERNEL
);
2437 if (!new_net_conf
) {
2438 retcode
= ERR_NOMEM
;
2442 set_net_conf_defaults(new_net_conf
);
2444 err
= net_conf_from_attrs(new_net_conf
, info
);
2445 if (err
&& err
!= -ENOMSG
) {
2446 retcode
= ERR_MANDATORY_TAG
;
2447 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2451 retcode
= check_net_options(connection
, new_net_conf
);
2452 if (retcode
!= NO_ERROR
)
2455 retcode
= alloc_crypto(&crypto
, new_net_conf
);
2456 if (retcode
!= NO_ERROR
)
2459 ((char *)new_net_conf
->shared_secret
)[SHARED_SECRET_MAX
-1] = 0;
2461 drbd_flush_workqueue(&connection
->sender_work
);
2463 mutex_lock(&adm_ctx
.resource
->conf_update
);
2464 old_net_conf
= connection
->net_conf
;
2466 retcode
= ERR_NET_CONFIGURED
;
2467 mutex_unlock(&adm_ctx
.resource
->conf_update
);
2470 rcu_assign_pointer(connection
->net_conf
, new_net_conf
);
2472 conn_free_crypto(connection
);
2473 connection
->cram_hmac_tfm
= crypto
.cram_hmac_tfm
;
2474 connection
->integrity_tfm
= crypto
.integrity_tfm
;
2475 connection
->csums_tfm
= crypto
.csums_tfm
;
2476 connection
->verify_tfm
= crypto
.verify_tfm
;
2478 connection
->my_addr_len
= nla_len(adm_ctx
.my_addr
);
2479 memcpy(&connection
->my_addr
, nla_data(adm_ctx
.my_addr
), connection
->my_addr_len
);
2480 connection
->peer_addr_len
= nla_len(adm_ctx
.peer_addr
);
2481 memcpy(&connection
->peer_addr
, nla_data(adm_ctx
.peer_addr
), connection
->peer_addr_len
);
2483 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2487 connection_to_info(&connection_info
, connection
);
2488 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
2489 mutex_lock(¬ification_mutex
);
2490 notify_connection_state(NULL
, 0, connection
, &connection_info
, NOTIFY_CREATE
| flags
);
2491 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2492 struct peer_device_info peer_device_info
;
2494 peer_device_to_info(&peer_device_info
, peer_device
);
2495 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
2496 notify_peer_device_state(NULL
, 0, peer_device
, &peer_device_info
, NOTIFY_CREATE
| flags
);
2498 mutex_unlock(¬ification_mutex
);
2499 mutex_unlock(&adm_ctx
.resource
->conf_update
);
2502 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
2503 struct drbd_device
*device
= peer_device
->device
;
2504 device
->send_cnt
= 0;
2505 device
->recv_cnt
= 0;
2509 retcode
= conn_request_state(connection
, NS(conn
, C_UNCONNECTED
), CS_VERBOSE
);
2511 conn_reconfig_done(connection
);
2512 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2513 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2517 free_crypto(&crypto
);
2518 kfree(new_net_conf
);
2520 conn_reconfig_done(connection
);
2521 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2523 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2527 static enum drbd_state_rv
conn_try_disconnect(struct drbd_connection
*connection
, bool force
)
2529 enum drbd_state_rv rv
;
2531 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
),
2532 force
? CS_HARD
: 0);
2535 case SS_NOTHING_TO_DO
:
2537 case SS_ALREADY_STANDALONE
:
2539 case SS_PRIMARY_NOP
:
2540 /* Our state checking code wants to see the peer outdated. */
2541 rv
= conn_request_state(connection
, NS2(conn
, C_DISCONNECTING
, pdsk
, D_OUTDATED
), 0);
2543 if (rv
== SS_OUTDATE_WO_CONN
) /* lost connection before graceful disconnect succeeded */
2544 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
), CS_VERBOSE
);
2547 case SS_CW_FAILED_BY_PEER
:
2548 /* The peer probably wants to see us outdated. */
2549 rv
= conn_request_state(connection
, NS2(conn
, C_DISCONNECTING
,
2550 disk
, D_OUTDATED
), 0);
2551 if (rv
== SS_IS_DISKLESS
|| rv
== SS_LOWER_THAN_OUTDATED
) {
2552 rv
= conn_request_state(connection
, NS(conn
, C_DISCONNECTING
),
2557 /* no special handling necessary */
2560 if (rv
>= SS_SUCCESS
) {
2561 enum drbd_state_rv rv2
;
2562 /* No one else can reconfigure the network while I am here.
2563 * The state handling only uses drbd_thread_stop_nowait(),
2564 * we want to really wait here until the receiver is no more.
2566 drbd_thread_stop(&connection
->receiver
);
2568 /* Race breaker. This additional state change request may be
2569 * necessary, if this was a forced disconnect during a receiver
2570 * restart. We may have "killed" the receiver thread just
2571 * after drbd_receiver() returned. Typically, we should be
2572 * C_STANDALONE already, now, and this becomes a no-op.
2574 rv2
= conn_request_state(connection
, NS(conn
, C_STANDALONE
),
2575 CS_VERBOSE
| CS_HARD
);
2576 if (rv2
< SS_SUCCESS
)
2577 drbd_err(connection
,
2578 "unexpected rv2=%d in conn_try_disconnect()\n",
2580 /* Unlike in DRBD 9, the state engine has generated
2581 * NOTIFY_DESTROY events before clearing connection->net_conf. */
2586 int drbd_adm_disconnect(struct sk_buff
*skb
, struct genl_info
*info
)
2588 struct drbd_config_context adm_ctx
;
2589 struct disconnect_parms parms
;
2590 struct drbd_connection
*connection
;
2591 enum drbd_state_rv rv
;
2592 enum drbd_ret_code retcode
;
2595 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_CONNECTION
);
2596 if (!adm_ctx
.reply_skb
)
2598 if (retcode
!= NO_ERROR
)
2601 connection
= adm_ctx
.connection
;
2602 memset(&parms
, 0, sizeof(parms
));
2603 if (info
->attrs
[DRBD_NLA_DISCONNECT_PARMS
]) {
2604 err
= disconnect_parms_from_attrs(&parms
, info
);
2606 retcode
= ERR_MANDATORY_TAG
;
2607 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2612 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2613 rv
= conn_try_disconnect(connection
, parms
.force_disconnect
);
2614 if (rv
< SS_SUCCESS
)
2615 retcode
= rv
; /* FIXME: Type mismatch. */
2618 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2620 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2624 void resync_after_online_grow(struct drbd_device
*device
)
2626 int iass
; /* I am sync source */
2628 drbd_info(device
, "Resync of new storage after online grow\n");
2629 if (device
->state
.role
!= device
->state
.peer
)
2630 iass
= (device
->state
.role
== R_PRIMARY
);
2632 iass
= test_bit(RESOLVE_CONFLICTS
, &first_peer_device(device
)->connection
->flags
);
2635 drbd_start_resync(device
, C_SYNC_SOURCE
);
2637 _drbd_request_state(device
, NS(conn
, C_WF_SYNC_UUID
), CS_VERBOSE
+ CS_SERIALIZE
);
2640 int drbd_adm_resize(struct sk_buff
*skb
, struct genl_info
*info
)
2642 struct drbd_config_context adm_ctx
;
2643 struct disk_conf
*old_disk_conf
, *new_disk_conf
= NULL
;
2644 struct resize_parms rs
;
2645 struct drbd_device
*device
;
2646 enum drbd_ret_code retcode
;
2647 enum determine_dev_size dd
;
2648 bool change_al_layout
= false;
2649 enum dds_flags ddsf
;
2653 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2654 if (!adm_ctx
.reply_skb
)
2656 if (retcode
!= NO_ERROR
)
2659 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2660 device
= adm_ctx
.device
;
2661 if (!get_ldev(device
)) {
2662 retcode
= ERR_NO_DISK
;
2666 memset(&rs
, 0, sizeof(struct resize_parms
));
2667 rs
.al_stripes
= device
->ldev
->md
.al_stripes
;
2668 rs
.al_stripe_size
= device
->ldev
->md
.al_stripe_size_4k
* 4;
2669 if (info
->attrs
[DRBD_NLA_RESIZE_PARMS
]) {
2670 err
= resize_parms_from_attrs(&rs
, info
);
2672 retcode
= ERR_MANDATORY_TAG
;
2673 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2678 if (device
->state
.conn
> C_CONNECTED
) {
2679 retcode
= ERR_RESIZE_RESYNC
;
2683 if (device
->state
.role
== R_SECONDARY
&&
2684 device
->state
.peer
== R_SECONDARY
) {
2685 retcode
= ERR_NO_PRIMARY
;
2689 if (rs
.no_resync
&& first_peer_device(device
)->connection
->agreed_pro_version
< 93) {
2690 retcode
= ERR_NEED_APV_93
;
2695 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
2697 if (u_size
!= (sector_t
)rs
.resize_size
) {
2698 new_disk_conf
= kmalloc(sizeof(struct disk_conf
), GFP_KERNEL
);
2699 if (!new_disk_conf
) {
2700 retcode
= ERR_NOMEM
;
2705 if (device
->ldev
->md
.al_stripes
!= rs
.al_stripes
||
2706 device
->ldev
->md
.al_stripe_size_4k
!= rs
.al_stripe_size
/ 4) {
2707 u32 al_size_k
= rs
.al_stripes
* rs
.al_stripe_size
;
2709 if (al_size_k
> (16 * 1024 * 1024)) {
2710 retcode
= ERR_MD_LAYOUT_TOO_BIG
;
2714 if (al_size_k
< MD_32kB_SECT
/2) {
2715 retcode
= ERR_MD_LAYOUT_TOO_SMALL
;
2719 if (device
->state
.conn
!= C_CONNECTED
&& !rs
.resize_force
) {
2720 retcode
= ERR_MD_LAYOUT_CONNECTED
;
2724 change_al_layout
= true;
2727 if (device
->ldev
->known_size
!= drbd_get_capacity(device
->ldev
->backing_bdev
))
2728 device
->ldev
->known_size
= drbd_get_capacity(device
->ldev
->backing_bdev
);
2730 if (new_disk_conf
) {
2731 mutex_lock(&device
->resource
->conf_update
);
2732 old_disk_conf
= device
->ldev
->disk_conf
;
2733 *new_disk_conf
= *old_disk_conf
;
2734 new_disk_conf
->disk_size
= (sector_t
)rs
.resize_size
;
2735 rcu_assign_pointer(device
->ldev
->disk_conf
, new_disk_conf
);
2736 mutex_unlock(&device
->resource
->conf_update
);
2738 kfree(old_disk_conf
);
2739 new_disk_conf
= NULL
;
2742 ddsf
= (rs
.resize_force
? DDSF_FORCED
: 0) | (rs
.no_resync
? DDSF_NO_RESYNC
: 0);
2743 dd
= drbd_determine_dev_size(device
, ddsf
, change_al_layout
? &rs
: NULL
);
2744 drbd_md_sync(device
);
2746 if (dd
== DS_ERROR
) {
2747 retcode
= ERR_NOMEM_BITMAP
;
2749 } else if (dd
== DS_ERROR_SPACE_MD
) {
2750 retcode
= ERR_MD_LAYOUT_NO_FIT
;
2752 } else if (dd
== DS_ERROR_SHRINK
) {
2753 retcode
= ERR_IMPLICIT_SHRINK
;
2757 if (device
->state
.conn
== C_CONNECTED
) {
2759 set_bit(RESIZE_PENDING
, &device
->flags
);
2761 drbd_send_uuids(first_peer_device(device
));
2762 drbd_send_sizes(first_peer_device(device
), 1, ddsf
);
2766 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2768 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2773 kfree(new_disk_conf
);
2777 int drbd_adm_resource_opts(struct sk_buff
*skb
, struct genl_info
*info
)
2779 struct drbd_config_context adm_ctx
;
2780 enum drbd_ret_code retcode
;
2781 struct res_opts res_opts
;
2784 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
2785 if (!adm_ctx
.reply_skb
)
2787 if (retcode
!= NO_ERROR
)
2790 res_opts
= adm_ctx
.resource
->res_opts
;
2791 if (should_set_defaults(info
))
2792 set_res_opts_defaults(&res_opts
);
2794 err
= res_opts_from_attrs(&res_opts
, info
);
2795 if (err
&& err
!= -ENOMSG
) {
2796 retcode
= ERR_MANDATORY_TAG
;
2797 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
2801 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2802 err
= set_resource_options(adm_ctx
.resource
, &res_opts
);
2804 retcode
= ERR_INVALID_REQUEST
;
2806 retcode
= ERR_NOMEM
;
2808 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2811 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2815 int drbd_adm_invalidate(struct sk_buff
*skb
, struct genl_info
*info
)
2817 struct drbd_config_context adm_ctx
;
2818 struct drbd_device
*device
;
2819 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
2821 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2822 if (!adm_ctx
.reply_skb
)
2824 if (retcode
!= NO_ERROR
)
2827 device
= adm_ctx
.device
;
2828 if (!get_ldev(device
)) {
2829 retcode
= ERR_NO_DISK
;
2833 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2835 /* If there is still bitmap IO pending, probably because of a previous
2836 * resync just being finished, wait for it before requesting a new resync.
2837 * Also wait for it's after_state_ch(). */
2838 drbd_suspend_io(device
);
2839 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
2840 drbd_flush_workqueue(&first_peer_device(device
)->connection
->sender_work
);
2842 /* If we happen to be C_STANDALONE R_SECONDARY, just change to
2843 * D_INCONSISTENT, and set all bits in the bitmap. Otherwise,
2844 * try to start a resync handshake as sync target for full sync.
2846 if (device
->state
.conn
== C_STANDALONE
&& device
->state
.role
== R_SECONDARY
) {
2847 retcode
= drbd_request_state(device
, NS(disk
, D_INCONSISTENT
));
2848 if (retcode
>= SS_SUCCESS
) {
2849 if (drbd_bitmap_io(device
, &drbd_bmio_set_n_write
,
2850 "set_n_write from invalidate", BM_LOCKED_MASK
))
2851 retcode
= ERR_IO_MD_DISK
;
2854 retcode
= drbd_request_state(device
, NS(conn
, C_STARTING_SYNC_T
));
2855 drbd_resume_io(device
);
2856 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2859 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2863 static int drbd_adm_simple_request_state(struct sk_buff
*skb
, struct genl_info
*info
,
2864 union drbd_state mask
, union drbd_state val
)
2866 struct drbd_config_context adm_ctx
;
2867 enum drbd_ret_code retcode
;
2869 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2870 if (!adm_ctx
.reply_skb
)
2872 if (retcode
!= NO_ERROR
)
2875 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2876 retcode
= drbd_request_state(adm_ctx
.device
, mask
, val
);
2877 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2879 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2883 static int drbd_bmio_set_susp_al(struct drbd_device
*device
) __must_hold(local
)
2887 rv
= drbd_bmio_set_n_write(device
);
2888 drbd_suspend_al(device
);
2892 int drbd_adm_invalidate_peer(struct sk_buff
*skb
, struct genl_info
*info
)
2894 struct drbd_config_context adm_ctx
;
2895 int retcode
; /* drbd_ret_code, drbd_state_rv */
2896 struct drbd_device
*device
;
2898 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2899 if (!adm_ctx
.reply_skb
)
2901 if (retcode
!= NO_ERROR
)
2904 device
= adm_ctx
.device
;
2905 if (!get_ldev(device
)) {
2906 retcode
= ERR_NO_DISK
;
2910 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2912 /* If there is still bitmap IO pending, probably because of a previous
2913 * resync just being finished, wait for it before requesting a new resync.
2914 * Also wait for it's after_state_ch(). */
2915 drbd_suspend_io(device
);
2916 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
2917 drbd_flush_workqueue(&first_peer_device(device
)->connection
->sender_work
);
2919 /* If we happen to be C_STANDALONE R_PRIMARY, just set all bits
2920 * in the bitmap. Otherwise, try to start a resync handshake
2921 * as sync source for full sync.
2923 if (device
->state
.conn
== C_STANDALONE
&& device
->state
.role
== R_PRIMARY
) {
2924 /* The peer will get a resync upon connect anyways. Just make that
2925 into a full resync. */
2926 retcode
= drbd_request_state(device
, NS(pdsk
, D_INCONSISTENT
));
2927 if (retcode
>= SS_SUCCESS
) {
2928 if (drbd_bitmap_io(device
, &drbd_bmio_set_susp_al
,
2929 "set_n_write from invalidate_peer",
2930 BM_LOCKED_SET_ALLOWED
))
2931 retcode
= ERR_IO_MD_DISK
;
2934 retcode
= drbd_request_state(device
, NS(conn
, C_STARTING_SYNC_S
));
2935 drbd_resume_io(device
);
2936 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2939 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2943 int drbd_adm_pause_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2945 struct drbd_config_context adm_ctx
;
2946 enum drbd_ret_code retcode
;
2948 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2949 if (!adm_ctx
.reply_skb
)
2951 if (retcode
!= NO_ERROR
)
2954 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2955 if (drbd_request_state(adm_ctx
.device
, NS(user_isp
, 1)) == SS_NOTHING_TO_DO
)
2956 retcode
= ERR_PAUSE_IS_SET
;
2957 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2959 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2963 int drbd_adm_resume_sync(struct sk_buff
*skb
, struct genl_info
*info
)
2965 struct drbd_config_context adm_ctx
;
2966 union drbd_dev_state s
;
2967 enum drbd_ret_code retcode
;
2969 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
2970 if (!adm_ctx
.reply_skb
)
2972 if (retcode
!= NO_ERROR
)
2975 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
2976 if (drbd_request_state(adm_ctx
.device
, NS(user_isp
, 0)) == SS_NOTHING_TO_DO
) {
2977 s
= adm_ctx
.device
->state
;
2978 if (s
.conn
== C_PAUSED_SYNC_S
|| s
.conn
== C_PAUSED_SYNC_T
) {
2979 retcode
= s
.aftr_isp
? ERR_PIC_AFTER_DEP
:
2980 s
.peer_isp
? ERR_PIC_PEER_DEP
: ERR_PAUSE_IS_CLEAR
;
2982 retcode
= ERR_PAUSE_IS_CLEAR
;
2985 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
2987 drbd_adm_finish(&adm_ctx
, info
, retcode
);
2991 int drbd_adm_suspend_io(struct sk_buff
*skb
, struct genl_info
*info
)
2993 return drbd_adm_simple_request_state(skb
, info
, NS(susp
, 1));
2996 int drbd_adm_resume_io(struct sk_buff
*skb
, struct genl_info
*info
)
2998 struct drbd_config_context adm_ctx
;
2999 struct drbd_device
*device
;
3000 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
3002 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3003 if (!adm_ctx
.reply_skb
)
3005 if (retcode
!= NO_ERROR
)
3008 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3009 device
= adm_ctx
.device
;
3010 if (test_bit(NEW_CUR_UUID
, &device
->flags
)) {
3011 if (get_ldev_if_state(device
, D_ATTACHING
)) {
3012 drbd_uuid_new_current(device
);
3015 /* This is effectively a multi-stage "forced down".
3016 * The NEW_CUR_UUID bit is supposedly only set, if we
3017 * lost the replication connection, and are configured
3018 * to freeze IO and wait for some fence-peer handler.
3019 * So we still don't have a replication connection.
3020 * And now we don't have a local disk either. After
3021 * resume, we will fail all pending and new IO, because
3022 * we don't have any data anymore. Which means we will
3023 * eventually be able to terminate all users of this
3024 * device, and then take it down. By bumping the
3025 * "effective" data uuid, we make sure that you really
3026 * need to tear down before you reconfigure, we will
3027 * the refuse to re-connect or re-attach (because no
3028 * matching real data uuid exists).
3031 get_random_bytes(&val
, sizeof(u64
));
3032 drbd_set_ed_uuid(device
, val
);
3033 drbd_warn(device
, "Resumed without access to data; please tear down before attempting to re-configure.\n");
3035 clear_bit(NEW_CUR_UUID
, &device
->flags
);
3037 drbd_suspend_io(device
);
3038 retcode
= drbd_request_state(device
, NS3(susp
, 0, susp_nod
, 0, susp_fen
, 0));
3039 if (retcode
== SS_SUCCESS
) {
3040 if (device
->state
.conn
< C_CONNECTED
)
3041 tl_clear(first_peer_device(device
)->connection
);
3042 if (device
->state
.disk
== D_DISKLESS
|| device
->state
.disk
== D_FAILED
)
3043 tl_restart(first_peer_device(device
)->connection
, FAIL_FROZEN_DISK_IO
);
3045 drbd_resume_io(device
);
3046 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3048 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3052 int drbd_adm_outdate(struct sk_buff
*skb
, struct genl_info
*info
)
3054 return drbd_adm_simple_request_state(skb
, info
, NS(disk
, D_OUTDATED
));
3057 static int nla_put_drbd_cfg_context(struct sk_buff
*skb
,
3058 struct drbd_resource
*resource
,
3059 struct drbd_connection
*connection
,
3060 struct drbd_device
*device
)
3063 nla
= nla_nest_start(skb
, DRBD_NLA_CFG_CONTEXT
);
3065 goto nla_put_failure
;
3067 nla_put_u32(skb
, T_ctx_volume
, device
->vnr
))
3068 goto nla_put_failure
;
3069 if (nla_put_string(skb
, T_ctx_resource_name
, resource
->name
))
3070 goto nla_put_failure
;
3072 if (connection
->my_addr_len
&&
3073 nla_put(skb
, T_ctx_my_addr
, connection
->my_addr_len
, &connection
->my_addr
))
3074 goto nla_put_failure
;
3075 if (connection
->peer_addr_len
&&
3076 nla_put(skb
, T_ctx_peer_addr
, connection
->peer_addr_len
, &connection
->peer_addr
))
3077 goto nla_put_failure
;
3079 nla_nest_end(skb
, nla
);
3084 nla_nest_cancel(skb
, nla
);
3089 * The generic netlink dump callbacks are called outside the genl_lock(), so
3090 * they cannot use the simple attribute parsing code which uses global
3093 static struct nlattr
*find_cfg_context_attr(const struct nlmsghdr
*nlh
, int attr
)
3095 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
3096 const int maxtype
= ARRAY_SIZE(drbd_cfg_context_nl_policy
) - 1;
3099 nla
= nla_find(nlmsg_attrdata(nlh
, hdrlen
), nlmsg_attrlen(nlh
, hdrlen
),
3100 DRBD_NLA_CFG_CONTEXT
);
3103 return drbd_nla_find_nested(maxtype
, nla
, __nla_type(attr
));
3106 static void resource_to_info(struct resource_info
*, struct drbd_resource
*);
3108 int drbd_adm_dump_resources(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3110 struct drbd_genlmsghdr
*dh
;
3111 struct drbd_resource
*resource
;
3112 struct resource_info resource_info
;
3113 struct resource_statistics resource_statistics
;
3118 for_each_resource_rcu(resource
, &drbd_resources
)
3119 if (resource
== (struct drbd_resource
*)cb
->args
[0])
3120 goto found_resource
;
3121 err
= 0; /* resource was probably deleted */
3124 resource
= list_entry(&drbd_resources
,
3125 struct drbd_resource
, resources
);
3128 list_for_each_entry_continue_rcu(resource
, &drbd_resources
, resources
) {
3135 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3136 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3137 NLM_F_MULTI
, DRBD_ADM_GET_RESOURCES
);
3142 dh
->ret_code
= NO_ERROR
;
3143 err
= nla_put_drbd_cfg_context(skb
, resource
, NULL
, NULL
);
3146 err
= res_opts_to_skb(skb
, &resource
->res_opts
, !capable(CAP_SYS_ADMIN
));
3149 resource_to_info(&resource_info
, resource
);
3150 err
= resource_info_to_skb(skb
, &resource_info
, !capable(CAP_SYS_ADMIN
));
3153 resource_statistics
.res_stat_write_ordering
= resource
->write_ordering
;
3154 err
= resource_statistics_to_skb(skb
, &resource_statistics
, !capable(CAP_SYS_ADMIN
));
3157 cb
->args
[0] = (long)resource
;
3158 genlmsg_end(skb
, dh
);
3168 static void device_to_statistics(struct device_statistics
*s
,
3169 struct drbd_device
*device
)
3171 memset(s
, 0, sizeof(*s
));
3172 s
->dev_upper_blocked
= !may_inc_ap_bio(device
);
3173 if (get_ldev(device
)) {
3174 struct drbd_md
*md
= &device
->ldev
->md
;
3175 u64
*history_uuids
= (u64
*)s
->history_uuids
;
3176 struct request_queue
*q
;
3179 spin_lock_irq(&md
->uuid_lock
);
3180 s
->dev_current_uuid
= md
->uuid
[UI_CURRENT
];
3181 BUILD_BUG_ON(sizeof(s
->history_uuids
) < UI_HISTORY_END
- UI_HISTORY_START
+ 1);
3182 for (n
= 0; n
< UI_HISTORY_END
- UI_HISTORY_START
+ 1; n
++)
3183 history_uuids
[n
] = md
->uuid
[UI_HISTORY_START
+ n
];
3184 for (; n
< HISTORY_UUIDS
; n
++)
3185 history_uuids
[n
] = 0;
3186 s
->history_uuids_len
= HISTORY_UUIDS
;
3187 spin_unlock_irq(&md
->uuid_lock
);
3189 s
->dev_disk_flags
= md
->flags
;
3190 q
= bdev_get_queue(device
->ldev
->backing_bdev
);
3191 s
->dev_lower_blocked
=
3192 bdi_congested(&q
->backing_dev_info
,
3193 (1 << WB_async_congested
) |
3194 (1 << WB_sync_congested
));
3197 s
->dev_size
= drbd_get_capacity(device
->this_bdev
);
3198 s
->dev_read
= device
->read_cnt
;
3199 s
->dev_write
= device
->writ_cnt
;
3200 s
->dev_al_writes
= device
->al_writ_cnt
;
3201 s
->dev_bm_writes
= device
->bm_writ_cnt
;
3202 s
->dev_upper_pending
= atomic_read(&device
->ap_bio_cnt
);
3203 s
->dev_lower_pending
= atomic_read(&device
->local_cnt
);
3204 s
->dev_al_suspended
= test_bit(AL_SUSPENDED
, &device
->flags
);
3205 s
->dev_exposed_data_uuid
= device
->ed_uuid
;
3208 static int put_resource_in_arg0(struct netlink_callback
*cb
, int holder_nr
)
3211 struct drbd_resource
*resource
=
3212 (struct drbd_resource
*)cb
->args
[0];
3213 kref_put(&resource
->kref
, drbd_destroy_resource
);
3219 int drbd_adm_dump_devices_done(struct netlink_callback
*cb
) {
3220 return put_resource_in_arg0(cb
, 7);
3223 static void device_to_info(struct device_info
*, struct drbd_device
*);
3225 int drbd_adm_dump_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3227 struct nlattr
*resource_filter
;
3228 struct drbd_resource
*resource
;
3229 struct drbd_device
*uninitialized_var(device
);
3230 int minor
, err
, retcode
;
3231 struct drbd_genlmsghdr
*dh
;
3232 struct device_info device_info
;
3233 struct device_statistics device_statistics
;
3234 struct idr
*idr_to_search
;
3236 resource
= (struct drbd_resource
*)cb
->args
[0];
3237 if (!cb
->args
[0] && !cb
->args
[1]) {
3238 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3239 if (resource_filter
) {
3240 retcode
= ERR_RES_NOT_KNOWN
;
3241 resource
= drbd_find_resource(nla_data(resource_filter
));
3244 cb
->args
[0] = (long)resource
;
3249 minor
= cb
->args
[1];
3250 idr_to_search
= resource
? &resource
->devices
: &drbd_devices
;
3251 device
= idr_get_next(idr_to_search
, &minor
);
3256 idr_for_each_entry_continue(idr_to_search
, device
, minor
) {
3258 goto put_result
; /* only one iteration */
3261 goto out
; /* no more devices */
3264 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3265 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3266 NLM_F_MULTI
, DRBD_ADM_GET_DEVICES
);
3270 dh
->ret_code
= retcode
;
3272 if (retcode
== NO_ERROR
) {
3273 dh
->minor
= device
->minor
;
3274 err
= nla_put_drbd_cfg_context(skb
, device
->resource
, NULL
, device
);
3277 if (get_ldev(device
)) {
3278 struct disk_conf
*disk_conf
=
3279 rcu_dereference(device
->ldev
->disk_conf
);
3281 err
= disk_conf_to_skb(skb
, disk_conf
, !capable(CAP_SYS_ADMIN
));
3286 device_to_info(&device_info
, device
);
3287 err
= device_info_to_skb(skb
, &device_info
, !capable(CAP_SYS_ADMIN
));
3291 device_to_statistics(&device_statistics
, device
);
3292 err
= device_statistics_to_skb(skb
, &device_statistics
, !capable(CAP_SYS_ADMIN
));
3295 cb
->args
[1] = minor
+ 1;
3297 genlmsg_end(skb
, dh
);
3307 int drbd_adm_dump_connections_done(struct netlink_callback
*cb
)
3309 return put_resource_in_arg0(cb
, 6);
3312 enum { SINGLE_RESOURCE
, ITERATE_RESOURCES
};
3314 int drbd_adm_dump_connections(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3316 struct nlattr
*resource_filter
;
3317 struct drbd_resource
*resource
= NULL
, *next_resource
;
3318 struct drbd_connection
*uninitialized_var(connection
);
3319 int err
= 0, retcode
;
3320 struct drbd_genlmsghdr
*dh
;
3321 struct connection_info connection_info
;
3322 struct connection_statistics connection_statistics
;
3325 resource
= (struct drbd_resource
*)cb
->args
[0];
3327 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3328 if (resource_filter
) {
3329 retcode
= ERR_RES_NOT_KNOWN
;
3330 resource
= drbd_find_resource(nla_data(resource_filter
));
3333 cb
->args
[0] = (long)resource
;
3334 cb
->args
[1] = SINGLE_RESOURCE
;
3338 if (list_empty(&drbd_resources
))
3340 resource
= list_first_entry(&drbd_resources
, struct drbd_resource
, resources
);
3341 kref_get(&resource
->kref
);
3342 cb
->args
[0] = (long)resource
;
3343 cb
->args
[1] = ITERATE_RESOURCES
;
3348 mutex_lock(&resource
->conf_update
);
3351 for_each_connection_rcu(connection
, resource
)
3352 if (connection
== (struct drbd_connection
*)cb
->args
[2])
3353 goto found_connection
;
3354 /* connection was probably deleted */
3355 goto no_more_connections
;
3357 connection
= list_entry(&resource
->connections
, struct drbd_connection
, connections
);
3360 list_for_each_entry_continue_rcu(connection
, &resource
->connections
, connections
) {
3361 if (!has_net_conf(connection
))
3364 goto put_result
; /* only one iteration */
3367 no_more_connections
:
3368 if (cb
->args
[1] == ITERATE_RESOURCES
) {
3369 for_each_resource_rcu(next_resource
, &drbd_resources
) {
3370 if (next_resource
== resource
)
3371 goto found_resource
;
3373 /* resource was probably deleted */
3378 list_for_each_entry_continue_rcu(next_resource
, &drbd_resources
, resources
) {
3379 mutex_unlock(&resource
->conf_update
);
3380 kref_put(&resource
->kref
, drbd_destroy_resource
);
3381 resource
= next_resource
;
3382 kref_get(&resource
->kref
);
3383 cb
->args
[0] = (long)resource
;
3387 goto out
; /* no more resources */
3390 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3391 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3392 NLM_F_MULTI
, DRBD_ADM_GET_CONNECTIONS
);
3396 dh
->ret_code
= retcode
;
3398 if (retcode
== NO_ERROR
) {
3399 struct net_conf
*net_conf
;
3401 err
= nla_put_drbd_cfg_context(skb
, resource
, connection
, NULL
);
3404 net_conf
= rcu_dereference(connection
->net_conf
);
3406 err
= net_conf_to_skb(skb
, net_conf
, !capable(CAP_SYS_ADMIN
));
3410 connection_to_info(&connection_info
, connection
);
3411 err
= connection_info_to_skb(skb
, &connection_info
, !capable(CAP_SYS_ADMIN
));
3414 connection_statistics
.conn_congested
= test_bit(NET_CONGESTED
, &connection
->flags
);
3415 err
= connection_statistics_to_skb(skb
, &connection_statistics
, !capable(CAP_SYS_ADMIN
));
3418 cb
->args
[2] = (long)connection
;
3420 genlmsg_end(skb
, dh
);
3426 mutex_unlock(&resource
->conf_update
);
3432 enum mdf_peer_flag
{
3433 MDF_PEER_CONNECTED
= 1 << 0,
3434 MDF_PEER_OUTDATED
= 1 << 1,
3435 MDF_PEER_FENCING
= 1 << 2,
3436 MDF_PEER_FULL_SYNC
= 1 << 3,
3439 static void peer_device_to_statistics(struct peer_device_statistics
*s
,
3440 struct drbd_peer_device
*peer_device
)
3442 struct drbd_device
*device
= peer_device
->device
;
3444 memset(s
, 0, sizeof(*s
));
3445 s
->peer_dev_received
= device
->recv_cnt
;
3446 s
->peer_dev_sent
= device
->send_cnt
;
3447 s
->peer_dev_pending
= atomic_read(&device
->ap_pending_cnt
) +
3448 atomic_read(&device
->rs_pending_cnt
);
3449 s
->peer_dev_unacked
= atomic_read(&device
->unacked_cnt
);
3450 s
->peer_dev_out_of_sync
= drbd_bm_total_weight(device
) << (BM_BLOCK_SHIFT
- 9);
3451 s
->peer_dev_resync_failed
= device
->rs_failed
<< (BM_BLOCK_SHIFT
- 9);
3452 if (get_ldev(device
)) {
3453 struct drbd_md
*md
= &device
->ldev
->md
;
3455 spin_lock_irq(&md
->uuid_lock
);
3456 s
->peer_dev_bitmap_uuid
= md
->uuid
[UI_BITMAP
];
3457 spin_unlock_irq(&md
->uuid_lock
);
3459 (drbd_md_test_flag(device
->ldev
, MDF_CONNECTED_IND
) ?
3460 MDF_PEER_CONNECTED
: 0) +
3461 (drbd_md_test_flag(device
->ldev
, MDF_CONSISTENT
) &&
3462 !drbd_md_test_flag(device
->ldev
, MDF_WAS_UP_TO_DATE
) ?
3463 MDF_PEER_OUTDATED
: 0) +
3464 /* FIXME: MDF_PEER_FENCING? */
3465 (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
) ?
3466 MDF_PEER_FULL_SYNC
: 0);
3471 int drbd_adm_dump_peer_devices_done(struct netlink_callback
*cb
)
3473 return put_resource_in_arg0(cb
, 9);
3476 int drbd_adm_dump_peer_devices(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3478 struct nlattr
*resource_filter
;
3479 struct drbd_resource
*resource
;
3480 struct drbd_device
*uninitialized_var(device
);
3481 struct drbd_peer_device
*peer_device
= NULL
;
3482 int minor
, err
, retcode
;
3483 struct drbd_genlmsghdr
*dh
;
3484 struct idr
*idr_to_search
;
3486 resource
= (struct drbd_resource
*)cb
->args
[0];
3487 if (!cb
->args
[0] && !cb
->args
[1]) {
3488 resource_filter
= find_cfg_context_attr(cb
->nlh
, T_ctx_resource_name
);
3489 if (resource_filter
) {
3490 retcode
= ERR_RES_NOT_KNOWN
;
3491 resource
= drbd_find_resource(nla_data(resource_filter
));
3495 cb
->args
[0] = (long)resource
;
3499 minor
= cb
->args
[1];
3500 idr_to_search
= resource
? &resource
->devices
: &drbd_devices
;
3501 device
= idr_find(idr_to_search
, minor
);
3506 device
= idr_get_next(idr_to_search
, &minor
);
3513 for_each_peer_device(peer_device
, device
)
3514 if (peer_device
== (struct drbd_peer_device
*)cb
->args
[2])
3515 goto found_peer_device
;
3516 /* peer device was probably deleted */
3519 /* Make peer_device point to the list head (not the first entry). */
3520 peer_device
= list_entry(&device
->peer_devices
, struct drbd_peer_device
, peer_devices
);
3523 list_for_each_entry_continue_rcu(peer_device
, &device
->peer_devices
, peer_devices
) {
3524 if (!has_net_conf(peer_device
->connection
))
3527 goto put_result
; /* only one iteration */
3532 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3533 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3534 NLM_F_MULTI
, DRBD_ADM_GET_PEER_DEVICES
);
3538 dh
->ret_code
= retcode
;
3540 if (retcode
== NO_ERROR
) {
3541 struct peer_device_info peer_device_info
;
3542 struct peer_device_statistics peer_device_statistics
;
3545 err
= nla_put_drbd_cfg_context(skb
, device
->resource
, peer_device
->connection
, device
);
3548 peer_device_to_info(&peer_device_info
, peer_device
);
3549 err
= peer_device_info_to_skb(skb
, &peer_device_info
, !capable(CAP_SYS_ADMIN
));
3552 peer_device_to_statistics(&peer_device_statistics
, peer_device
);
3553 err
= peer_device_statistics_to_skb(skb
, &peer_device_statistics
, !capable(CAP_SYS_ADMIN
));
3556 cb
->args
[1] = minor
;
3557 cb
->args
[2] = (long)peer_device
;
3559 genlmsg_end(skb
, dh
);
3569 * Return the connection of @resource if @resource has exactly one connection.
3571 static struct drbd_connection
*the_only_connection(struct drbd_resource
*resource
)
3573 struct list_head
*connections
= &resource
->connections
;
3575 if (list_empty(connections
) || connections
->next
->next
!= connections
)
3577 return list_first_entry(&resource
->connections
, struct drbd_connection
, connections
);
3580 static int nla_put_status_info(struct sk_buff
*skb
, struct drbd_device
*device
,
3581 const struct sib_info
*sib
)
3583 struct drbd_resource
*resource
= device
->resource
;
3584 struct state_info
*si
= NULL
; /* for sizeof(si->member); */
3588 int exclude_sensitive
;
3590 /* If sib != NULL, this is drbd_bcast_event, which anyone can listen
3591 * to. So we better exclude_sensitive information.
3593 * If sib == NULL, this is drbd_adm_get_status, executed synchronously
3594 * in the context of the requesting user process. Exclude sensitive
3595 * information, unless current has superuser.
3597 * NOTE: for drbd_adm_get_status_all(), this is a netlink dump, and
3598 * relies on the current implementation of netlink_dump(), which
3599 * executes the dump callback successively from netlink_recvmsg(),
3600 * always in the context of the receiving process */
3601 exclude_sensitive
= sib
|| !capable(CAP_SYS_ADMIN
);
3603 got_ldev
= get_ldev(device
);
3605 /* We need to add connection name and volume number information still.
3606 * Minor number is in drbd_genlmsghdr. */
3607 if (nla_put_drbd_cfg_context(skb
, resource
, the_only_connection(resource
), device
))
3608 goto nla_put_failure
;
3610 if (res_opts_to_skb(skb
, &device
->resource
->res_opts
, exclude_sensitive
))
3611 goto nla_put_failure
;
3615 struct disk_conf
*disk_conf
;
3617 disk_conf
= rcu_dereference(device
->ldev
->disk_conf
);
3618 err
= disk_conf_to_skb(skb
, disk_conf
, exclude_sensitive
);
3621 struct net_conf
*nc
;
3623 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
3625 err
= net_conf_to_skb(skb
, nc
, exclude_sensitive
);
3629 goto nla_put_failure
;
3631 nla
= nla_nest_start(skb
, DRBD_NLA_STATE_INFO
);
3633 goto nla_put_failure
;
3634 if (nla_put_u32(skb
, T_sib_reason
, sib
? sib
->sib_reason
: SIB_GET_STATUS_REPLY
) ||
3635 nla_put_u32(skb
, T_current_state
, device
->state
.i
) ||
3636 nla_put_u64(skb
, T_ed_uuid
, device
->ed_uuid
) ||
3637 nla_put_u64(skb
, T_capacity
, drbd_get_capacity(device
->this_bdev
)) ||
3638 nla_put_u64(skb
, T_send_cnt
, device
->send_cnt
) ||
3639 nla_put_u64(skb
, T_recv_cnt
, device
->recv_cnt
) ||
3640 nla_put_u64(skb
, T_read_cnt
, device
->read_cnt
) ||
3641 nla_put_u64(skb
, T_writ_cnt
, device
->writ_cnt
) ||
3642 nla_put_u64(skb
, T_al_writ_cnt
, device
->al_writ_cnt
) ||
3643 nla_put_u64(skb
, T_bm_writ_cnt
, device
->bm_writ_cnt
) ||
3644 nla_put_u32(skb
, T_ap_bio_cnt
, atomic_read(&device
->ap_bio_cnt
)) ||
3645 nla_put_u32(skb
, T_ap_pending_cnt
, atomic_read(&device
->ap_pending_cnt
)) ||
3646 nla_put_u32(skb
, T_rs_pending_cnt
, atomic_read(&device
->rs_pending_cnt
)))
3647 goto nla_put_failure
;
3652 spin_lock_irq(&device
->ldev
->md
.uuid_lock
);
3653 err
= nla_put(skb
, T_uuids
, sizeof(si
->uuids
), device
->ldev
->md
.uuid
);
3654 spin_unlock_irq(&device
->ldev
->md
.uuid_lock
);
3657 goto nla_put_failure
;
3659 if (nla_put_u32(skb
, T_disk_flags
, device
->ldev
->md
.flags
) ||
3660 nla_put_u64(skb
, T_bits_total
, drbd_bm_bits(device
)) ||
3661 nla_put_u64(skb
, T_bits_oos
, drbd_bm_total_weight(device
)))
3662 goto nla_put_failure
;
3663 if (C_SYNC_SOURCE
<= device
->state
.conn
&&
3664 C_PAUSED_SYNC_T
>= device
->state
.conn
) {
3665 if (nla_put_u64(skb
, T_bits_rs_total
, device
->rs_total
) ||
3666 nla_put_u64(skb
, T_bits_rs_failed
, device
->rs_failed
))
3667 goto nla_put_failure
;
3672 switch(sib
->sib_reason
) {
3673 case SIB_SYNC_PROGRESS
:
3674 case SIB_GET_STATUS_REPLY
:
3676 case SIB_STATE_CHANGE
:
3677 if (nla_put_u32(skb
, T_prev_state
, sib
->os
.i
) ||
3678 nla_put_u32(skb
, T_new_state
, sib
->ns
.i
))
3679 goto nla_put_failure
;
3681 case SIB_HELPER_POST
:
3682 if (nla_put_u32(skb
, T_helper_exit_code
,
3683 sib
->helper_exit_code
))
3684 goto nla_put_failure
;
3686 case SIB_HELPER_PRE
:
3687 if (nla_put_string(skb
, T_helper
, sib
->helper_name
))
3688 goto nla_put_failure
;
3692 nla_nest_end(skb
, nla
);
3702 int drbd_adm_get_status(struct sk_buff
*skb
, struct genl_info
*info
)
3704 struct drbd_config_context adm_ctx
;
3705 enum drbd_ret_code retcode
;
3708 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3709 if (!adm_ctx
.reply_skb
)
3711 if (retcode
!= NO_ERROR
)
3714 err
= nla_put_status_info(adm_ctx
.reply_skb
, adm_ctx
.device
, NULL
);
3716 nlmsg_free(adm_ctx
.reply_skb
);
3720 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3724 static int get_one_status(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3726 struct drbd_device
*device
;
3727 struct drbd_genlmsghdr
*dh
;
3728 struct drbd_resource
*pos
= (struct drbd_resource
*)cb
->args
[0];
3729 struct drbd_resource
*resource
= NULL
;
3730 struct drbd_resource
*tmp
;
3731 unsigned volume
= cb
->args
[1];
3733 /* Open coded, deferred, iteration:
3734 * for_each_resource_safe(resource, tmp, &drbd_resources) {
3735 * connection = "first connection of resource or undefined";
3736 * idr_for_each_entry(&resource->devices, device, i) {
3740 * where resource is cb->args[0];
3741 * and i is cb->args[1];
3743 * cb->args[2] indicates if we shall loop over all resources,
3744 * or just dump all volumes of a single resource.
3746 * This may miss entries inserted after this dump started,
3747 * or entries deleted before they are reached.
3749 * We need to make sure the device won't disappear while
3750 * we are looking at it, and revalidate our iterators
3751 * on each iteration.
3754 /* synchronize with conn_create()/drbd_destroy_connection() */
3756 /* revalidate iterator position */
3757 for_each_resource_rcu(tmp
, &drbd_resources
) {
3759 /* first iteration */
3771 device
= idr_get_next(&resource
->devices
, &volume
);
3773 /* No more volumes to dump on this resource.
3774 * Advance resource iterator. */
3775 pos
= list_entry_rcu(resource
->resources
.next
,
3776 struct drbd_resource
, resources
);
3777 /* Did we dump any volume of this resource yet? */
3779 /* If we reached the end of the list,
3780 * or only a single resource dump was requested,
3782 if (&pos
->resources
== &drbd_resources
|| cb
->args
[2])
3790 dh
= genlmsg_put(skb
, NETLINK_CB(cb
->skb
).portid
,
3791 cb
->nlh
->nlmsg_seq
, &drbd_genl_family
,
3792 NLM_F_MULTI
, DRBD_ADM_GET_STATUS
);
3797 /* This is a connection without a single volume.
3798 * Suprisingly enough, it may have a network
3800 struct drbd_connection
*connection
;
3803 dh
->ret_code
= NO_ERROR
;
3804 connection
= the_only_connection(resource
);
3805 if (nla_put_drbd_cfg_context(skb
, resource
, connection
, NULL
))
3808 struct net_conf
*nc
;
3810 nc
= rcu_dereference(connection
->net_conf
);
3811 if (nc
&& net_conf_to_skb(skb
, nc
, 1) != 0)
3817 D_ASSERT(device
, device
->vnr
== volume
);
3818 D_ASSERT(device
, device
->resource
== resource
);
3820 dh
->minor
= device_to_minor(device
);
3821 dh
->ret_code
= NO_ERROR
;
3823 if (nla_put_status_info(skb
, device
, NULL
)) {
3825 genlmsg_cancel(skb
, dh
);
3829 genlmsg_end(skb
, dh
);
3834 /* where to start the next iteration */
3835 cb
->args
[0] = (long)pos
;
3836 cb
->args
[1] = (pos
== resource
) ? volume
+ 1 : 0;
3838 /* No more resources/volumes/minors found results in an empty skb.
3839 * Which will terminate the dump. */
3844 * Request status of all resources, or of all volumes within a single resource.
3846 * This is a dump, as the answer may not fit in a single reply skb otherwise.
3847 * Which means we cannot use the family->attrbuf or other such members, because
3848 * dump is NOT protected by the genl_lock(). During dump, we only have access
3849 * to the incoming skb, and need to opencode "parsing" of the nlattr payload.
3851 * Once things are setup properly, we call into get_one_status().
3853 int drbd_adm_get_status_all(struct sk_buff
*skb
, struct netlink_callback
*cb
)
3855 const unsigned hdrlen
= GENL_HDRLEN
+ GENL_MAGIC_FAMILY_HDRSZ
;
3857 const char *resource_name
;
3858 struct drbd_resource
*resource
;
3861 /* Is this a followup call? */
3863 /* ... of a single resource dump,
3864 * and the resource iterator has been advanced already? */
3865 if (cb
->args
[2] && cb
->args
[2] != cb
->args
[0])
3866 return 0; /* DONE. */
3870 /* First call (from netlink_dump_start). We need to figure out
3871 * which resource(s) the user wants us to dump. */
3872 nla
= nla_find(nlmsg_attrdata(cb
->nlh
, hdrlen
),
3873 nlmsg_attrlen(cb
->nlh
, hdrlen
),
3874 DRBD_NLA_CFG_CONTEXT
);
3876 /* No explicit context given. Dump all. */
3879 maxtype
= ARRAY_SIZE(drbd_cfg_context_nl_policy
) - 1;
3880 nla
= drbd_nla_find_nested(maxtype
, nla
, __nla_type(T_ctx_resource_name
));
3882 return PTR_ERR(nla
);
3883 /* context given, but no name present? */
3886 resource_name
= nla_data(nla
);
3887 if (!*resource_name
)
3889 resource
= drbd_find_resource(resource_name
);
3893 kref_put(&resource
->kref
, drbd_destroy_resource
); /* get_one_status() revalidates the resource */
3895 /* prime iterators, and set "filter" mode mark:
3896 * only dump this connection. */
3897 cb
->args
[0] = (long)resource
;
3898 /* cb->args[1] = 0; passed in this way. */
3899 cb
->args
[2] = (long)resource
;
3902 return get_one_status(skb
, cb
);
3905 int drbd_adm_get_timeout_type(struct sk_buff
*skb
, struct genl_info
*info
)
3907 struct drbd_config_context adm_ctx
;
3908 enum drbd_ret_code retcode
;
3909 struct timeout_parms tp
;
3912 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3913 if (!adm_ctx
.reply_skb
)
3915 if (retcode
!= NO_ERROR
)
3919 adm_ctx
.device
->state
.pdsk
== D_OUTDATED
? UT_PEER_OUTDATED
:
3920 test_bit(USE_DEGR_WFC_T
, &adm_ctx
.device
->flags
) ? UT_DEGRADED
:
3923 err
= timeout_parms_to_priv_skb(adm_ctx
.reply_skb
, &tp
);
3925 nlmsg_free(adm_ctx
.reply_skb
);
3929 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3933 int drbd_adm_start_ov(struct sk_buff
*skb
, struct genl_info
*info
)
3935 struct drbd_config_context adm_ctx
;
3936 struct drbd_device
*device
;
3937 enum drbd_ret_code retcode
;
3938 struct start_ov_parms parms
;
3940 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3941 if (!adm_ctx
.reply_skb
)
3943 if (retcode
!= NO_ERROR
)
3946 device
= adm_ctx
.device
;
3948 /* resume from last known position, if possible */
3949 parms
.ov_start_sector
= device
->ov_start_sector
;
3950 parms
.ov_stop_sector
= ULLONG_MAX
;
3951 if (info
->attrs
[DRBD_NLA_START_OV_PARMS
]) {
3952 int err
= start_ov_parms_from_attrs(&parms
, info
);
3954 retcode
= ERR_MANDATORY_TAG
;
3955 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
3959 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
3961 /* w_make_ov_request expects position to be aligned */
3962 device
->ov_start_sector
= parms
.ov_start_sector
& ~(BM_SECT_PER_BIT
-1);
3963 device
->ov_stop_sector
= parms
.ov_stop_sector
;
3965 /* If there is still bitmap IO pending, e.g. previous resync or verify
3966 * just being finished, wait for it before requesting a new resync. */
3967 drbd_suspend_io(device
);
3968 wait_event(device
->misc_wait
, !test_bit(BITMAP_IO
, &device
->flags
));
3969 retcode
= drbd_request_state(device
, NS(conn
, C_VERIFY_S
));
3970 drbd_resume_io(device
);
3972 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
3974 drbd_adm_finish(&adm_ctx
, info
, retcode
);
3979 int drbd_adm_new_c_uuid(struct sk_buff
*skb
, struct genl_info
*info
)
3981 struct drbd_config_context adm_ctx
;
3982 struct drbd_device
*device
;
3983 enum drbd_ret_code retcode
;
3984 int skip_initial_sync
= 0;
3986 struct new_c_uuid_parms args
;
3988 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
3989 if (!adm_ctx
.reply_skb
)
3991 if (retcode
!= NO_ERROR
)
3994 device
= adm_ctx
.device
;
3995 memset(&args
, 0, sizeof(args
));
3996 if (info
->attrs
[DRBD_NLA_NEW_C_UUID_PARMS
]) {
3997 err
= new_c_uuid_parms_from_attrs(&args
, info
);
3999 retcode
= ERR_MANDATORY_TAG
;
4000 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4005 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4006 mutex_lock(device
->state_mutex
); /* Protects us against serialized state changes. */
4008 if (!get_ldev(device
)) {
4009 retcode
= ERR_NO_DISK
;
4013 /* this is "skip initial sync", assume to be clean */
4014 if (device
->state
.conn
== C_CONNECTED
&&
4015 first_peer_device(device
)->connection
->agreed_pro_version
>= 90 &&
4016 device
->ldev
->md
.uuid
[UI_CURRENT
] == UUID_JUST_CREATED
&& args
.clear_bm
) {
4017 drbd_info(device
, "Preparing to skip initial sync\n");
4018 skip_initial_sync
= 1;
4019 } else if (device
->state
.conn
!= C_STANDALONE
) {
4020 retcode
= ERR_CONNECTED
;
4024 drbd_uuid_set(device
, UI_BITMAP
, 0); /* Rotate UI_BITMAP to History 1, etc... */
4025 drbd_uuid_new_current(device
); /* New current, previous to UI_BITMAP */
4027 if (args
.clear_bm
) {
4028 err
= drbd_bitmap_io(device
, &drbd_bmio_clear_n_write
,
4029 "clear_n_write from new_c_uuid", BM_LOCKED_MASK
);
4031 drbd_err(device
, "Writing bitmap failed with %d\n", err
);
4032 retcode
= ERR_IO_MD_DISK
;
4034 if (skip_initial_sync
) {
4035 drbd_send_uuids_skip_initial_sync(first_peer_device(device
));
4036 _drbd_uuid_set(device
, UI_BITMAP
, 0);
4037 drbd_print_uuids(device
, "cleared bitmap UUID");
4038 spin_lock_irq(&device
->resource
->req_lock
);
4039 _drbd_set_state(_NS2(device
, disk
, D_UP_TO_DATE
, pdsk
, D_UP_TO_DATE
),
4041 spin_unlock_irq(&device
->resource
->req_lock
);
4045 drbd_md_sync(device
);
4049 mutex_unlock(device
->state_mutex
);
4050 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4052 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4056 static enum drbd_ret_code
4057 drbd_check_resource_name(struct drbd_config_context
*adm_ctx
)
4059 const char *name
= adm_ctx
->resource_name
;
4060 if (!name
|| !name
[0]) {
4061 drbd_msg_put_info(adm_ctx
->reply_skb
, "resource name missing");
4062 return ERR_MANDATORY_TAG
;
4064 /* if we want to use these in sysfs/configfs/debugfs some day,
4065 * we must not allow slashes */
4066 if (strchr(name
, '/')) {
4067 drbd_msg_put_info(adm_ctx
->reply_skb
, "invalid resource name");
4068 return ERR_INVALID_REQUEST
;
4073 static void resource_to_info(struct resource_info
*info
,
4074 struct drbd_resource
*resource
)
4076 info
->res_role
= conn_highest_role(first_connection(resource
));
4077 info
->res_susp
= resource
->susp
;
4078 info
->res_susp_nod
= resource
->susp_nod
;
4079 info
->res_susp_fen
= resource
->susp_fen
;
4082 int drbd_adm_new_resource(struct sk_buff
*skb
, struct genl_info
*info
)
4084 struct drbd_connection
*connection
;
4085 struct drbd_config_context adm_ctx
;
4086 enum drbd_ret_code retcode
;
4087 struct res_opts res_opts
;
4090 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, 0);
4091 if (!adm_ctx
.reply_skb
)
4093 if (retcode
!= NO_ERROR
)
4096 set_res_opts_defaults(&res_opts
);
4097 err
= res_opts_from_attrs(&res_opts
, info
);
4098 if (err
&& err
!= -ENOMSG
) {
4099 retcode
= ERR_MANDATORY_TAG
;
4100 drbd_msg_put_info(adm_ctx
.reply_skb
, from_attrs_err_to_txt(err
));
4104 retcode
= drbd_check_resource_name(&adm_ctx
);
4105 if (retcode
!= NO_ERROR
)
4108 if (adm_ctx
.resource
) {
4109 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
) {
4110 retcode
= ERR_INVALID_REQUEST
;
4111 drbd_msg_put_info(adm_ctx
.reply_skb
, "resource exists");
4113 /* else: still NO_ERROR */
4117 /* not yet safe for genl_family.parallel_ops */
4118 mutex_lock(&resources_mutex
);
4119 connection
= conn_create(adm_ctx
.resource_name
, &res_opts
);
4120 mutex_unlock(&resources_mutex
);
4123 struct resource_info resource_info
;
4125 mutex_lock(¬ification_mutex
);
4126 resource_to_info(&resource_info
, connection
->resource
);
4127 notify_resource_state(NULL
, 0, connection
->resource
,
4128 &resource_info
, NOTIFY_CREATE
);
4129 mutex_unlock(¬ification_mutex
);
4131 retcode
= ERR_NOMEM
;
4134 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4138 static void device_to_info(struct device_info
*info
,
4139 struct drbd_device
*device
)
4141 info
->dev_disk_state
= device
->state
.disk
;
4145 int drbd_adm_new_minor(struct sk_buff
*skb
, struct genl_info
*info
)
4147 struct drbd_config_context adm_ctx
;
4148 struct drbd_genlmsghdr
*dh
= info
->userhdr
;
4149 enum drbd_ret_code retcode
;
4151 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4152 if (!adm_ctx
.reply_skb
)
4154 if (retcode
!= NO_ERROR
)
4157 if (dh
->minor
> MINORMASK
) {
4158 drbd_msg_put_info(adm_ctx
.reply_skb
, "requested minor out of range");
4159 retcode
= ERR_INVALID_REQUEST
;
4162 if (adm_ctx
.volume
> DRBD_VOLUME_MAX
) {
4163 drbd_msg_put_info(adm_ctx
.reply_skb
, "requested volume id out of range");
4164 retcode
= ERR_INVALID_REQUEST
;
4168 /* drbd_adm_prepare made sure already
4169 * that first_peer_device(device)->connection and device->vnr match the request. */
4170 if (adm_ctx
.device
) {
4171 if (info
->nlhdr
->nlmsg_flags
& NLM_F_EXCL
)
4172 retcode
= ERR_MINOR_OR_VOLUME_EXISTS
;
4173 /* else: still NO_ERROR */
4177 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4178 retcode
= drbd_create_device(&adm_ctx
, dh
->minor
);
4179 if (retcode
== NO_ERROR
) {
4180 struct drbd_device
*device
;
4181 struct drbd_peer_device
*peer_device
;
4182 struct device_info info
;
4183 unsigned int peer_devices
= 0;
4184 enum drbd_notification_type flags
;
4186 device
= minor_to_device(dh
->minor
);
4187 for_each_peer_device(peer_device
, device
) {
4188 if (!has_net_conf(peer_device
->connection
))
4193 device_to_info(&info
, device
);
4194 mutex_lock(¬ification_mutex
);
4195 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
4196 notify_device_state(NULL
, 0, device
, &info
, NOTIFY_CREATE
| flags
);
4197 for_each_peer_device(peer_device
, device
) {
4198 struct peer_device_info peer_device_info
;
4200 if (!has_net_conf(peer_device
->connection
))
4202 peer_device_to_info(&peer_device_info
, peer_device
);
4203 flags
= (peer_devices
--) ? NOTIFY_CONTINUES
: 0;
4204 notify_peer_device_state(NULL
, 0, peer_device
, &peer_device_info
,
4205 NOTIFY_CREATE
| flags
);
4207 mutex_unlock(¬ification_mutex
);
4209 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4211 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4215 static enum drbd_ret_code
adm_del_minor(struct drbd_device
*device
)
4217 struct drbd_peer_device
*peer_device
;
4219 if (device
->state
.disk
== D_DISKLESS
&&
4220 /* no need to be device->state.conn == C_STANDALONE &&
4221 * we may want to delete a minor from a live replication group.
4223 device
->state
.role
== R_SECONDARY
) {
4224 struct drbd_connection
*connection
=
4225 first_connection(device
->resource
);
4227 _drbd_request_state(device
, NS(conn
, C_WF_REPORT_PARAMS
),
4228 CS_VERBOSE
+ CS_WAIT_COMPLETE
);
4230 /* If the state engine hasn't stopped the sender thread yet, we
4231 * need to flush the sender work queue before generating the
4232 * DESTROY events here. */
4233 if (get_t_state(&connection
->worker
) == RUNNING
)
4234 drbd_flush_workqueue(&connection
->sender_work
);
4236 mutex_lock(¬ification_mutex
);
4237 for_each_peer_device(peer_device
, device
) {
4238 if (!has_net_conf(peer_device
->connection
))
4240 notify_peer_device_state(NULL
, 0, peer_device
, NULL
,
4241 NOTIFY_DESTROY
| NOTIFY_CONTINUES
);
4243 notify_device_state(NULL
, 0, device
, NULL
, NOTIFY_DESTROY
);
4244 mutex_unlock(¬ification_mutex
);
4246 drbd_delete_device(device
);
4249 return ERR_MINOR_CONFIGURED
;
4252 int drbd_adm_del_minor(struct sk_buff
*skb
, struct genl_info
*info
)
4254 struct drbd_config_context adm_ctx
;
4255 enum drbd_ret_code retcode
;
4257 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_MINOR
);
4258 if (!adm_ctx
.reply_skb
)
4260 if (retcode
!= NO_ERROR
)
4263 mutex_lock(&adm_ctx
.resource
->adm_mutex
);
4264 retcode
= adm_del_minor(adm_ctx
.device
);
4265 mutex_unlock(&adm_ctx
.resource
->adm_mutex
);
4267 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4271 static int adm_del_resource(struct drbd_resource
*resource
)
4273 struct drbd_connection
*connection
;
4275 for_each_connection(connection
, resource
) {
4276 if (connection
->cstate
> C_STANDALONE
)
4277 return ERR_NET_CONFIGURED
;
4279 if (!idr_is_empty(&resource
->devices
))
4280 return ERR_RES_IN_USE
;
4282 /* The state engine has stopped the sender thread, so we don't
4283 * need to flush the sender work queue before generating the
4284 * DESTROY event here. */
4285 mutex_lock(¬ification_mutex
);
4286 notify_resource_state(NULL
, 0, resource
, NULL
, NOTIFY_DESTROY
);
4287 mutex_unlock(¬ification_mutex
);
4289 mutex_lock(&resources_mutex
);
4290 list_del_rcu(&resource
->resources
);
4291 mutex_unlock(&resources_mutex
);
4292 /* Make sure all threads have actually stopped: state handling only
4293 * does drbd_thread_stop_nowait(). */
4294 list_for_each_entry(connection
, &resource
->connections
, connections
)
4295 drbd_thread_stop(&connection
->worker
);
4297 drbd_free_resource(resource
);
4301 int drbd_adm_down(struct sk_buff
*skb
, struct genl_info
*info
)
4303 struct drbd_config_context adm_ctx
;
4304 struct drbd_resource
*resource
;
4305 struct drbd_connection
*connection
;
4306 struct drbd_device
*device
;
4307 int retcode
; /* enum drbd_ret_code rsp. enum drbd_state_rv */
4310 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4311 if (!adm_ctx
.reply_skb
)
4313 if (retcode
!= NO_ERROR
)
4316 resource
= adm_ctx
.resource
;
4317 mutex_lock(&resource
->adm_mutex
);
4319 for_each_connection(connection
, resource
) {
4320 struct drbd_peer_device
*peer_device
;
4322 idr_for_each_entry(&connection
->peer_devices
, peer_device
, i
) {
4323 retcode
= drbd_set_role(peer_device
->device
, R_SECONDARY
, 0);
4324 if (retcode
< SS_SUCCESS
) {
4325 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to demote");
4330 retcode
= conn_try_disconnect(connection
, 0);
4331 if (retcode
< SS_SUCCESS
) {
4332 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to disconnect");
4338 idr_for_each_entry(&resource
->devices
, device
, i
) {
4339 retcode
= adm_detach(device
, 0);
4340 if (retcode
< SS_SUCCESS
|| retcode
> NO_ERROR
) {
4341 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to detach");
4346 /* delete volumes */
4347 idr_for_each_entry(&resource
->devices
, device
, i
) {
4348 retcode
= adm_del_minor(device
);
4349 if (retcode
!= NO_ERROR
) {
4350 /* "can not happen" */
4351 drbd_msg_put_info(adm_ctx
.reply_skb
, "failed to delete volume");
4356 retcode
= adm_del_resource(resource
);
4358 mutex_unlock(&resource
->adm_mutex
);
4360 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4364 int drbd_adm_del_resource(struct sk_buff
*skb
, struct genl_info
*info
)
4366 struct drbd_config_context adm_ctx
;
4367 struct drbd_resource
*resource
;
4368 enum drbd_ret_code retcode
;
4370 retcode
= drbd_adm_prepare(&adm_ctx
, skb
, info
, DRBD_ADM_NEED_RESOURCE
);
4371 if (!adm_ctx
.reply_skb
)
4373 if (retcode
!= NO_ERROR
)
4375 resource
= adm_ctx
.resource
;
4377 mutex_lock(&resource
->adm_mutex
);
4378 retcode
= adm_del_resource(resource
);
4379 mutex_unlock(&resource
->adm_mutex
);
4381 drbd_adm_finish(&adm_ctx
, info
, retcode
);
4385 void drbd_bcast_event(struct drbd_device
*device
, const struct sib_info
*sib
)
4387 struct sk_buff
*msg
;
4388 struct drbd_genlmsghdr
*d_out
;
4392 seq
= atomic_inc_return(&drbd_genl_seq
);
4393 msg
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4398 d_out
= genlmsg_put(msg
, 0, seq
, &drbd_genl_family
, 0, DRBD_EVENT
);
4399 if (!d_out
) /* cannot happen, but anyways. */
4400 goto nla_put_failure
;
4401 d_out
->minor
= device_to_minor(device
);
4402 d_out
->ret_code
= NO_ERROR
;
4404 if (nla_put_status_info(msg
, device
, sib
))
4405 goto nla_put_failure
;
4406 genlmsg_end(msg
, d_out
);
4407 err
= drbd_genl_multicast_events(msg
, GFP_NOWAIT
);
4408 /* msg has been consumed or freed in netlink_broadcast() */
4409 if (err
&& err
!= -ESRCH
)
4417 drbd_err(device
, "Error %d while broadcasting event. "
4418 "Event seq:%u sib_reason:%u\n",
4419 err
, seq
, sib
->sib_reason
);
4422 static int nla_put_notification_header(struct sk_buff
*msg
,
4423 enum drbd_notification_type type
)
4425 struct drbd_notification_header nh
= {
4429 return drbd_notification_header_to_skb(msg
, &nh
, true);
4432 void notify_resource_state(struct sk_buff
*skb
,
4434 struct drbd_resource
*resource
,
4435 struct resource_info
*resource_info
,
4436 enum drbd_notification_type type
)
4438 struct resource_statistics resource_statistics
;
4439 struct drbd_genlmsghdr
*dh
;
4440 bool multicast
= false;
4444 seq
= atomic_inc_return(¬ify_genl_seq
);
4445 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4453 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_RESOURCE_STATE
);
4455 goto nla_put_failure
;
4457 dh
->ret_code
= NO_ERROR
;
4458 if (nla_put_drbd_cfg_context(skb
, resource
, NULL
, NULL
) ||
4459 nla_put_notification_header(skb
, type
) ||
4460 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4461 resource_info_to_skb(skb
, resource_info
, true)))
4462 goto nla_put_failure
;
4463 resource_statistics
.res_stat_write_ordering
= resource
->write_ordering
;
4464 err
= resource_statistics_to_skb(skb
, &resource_statistics
, !capable(CAP_SYS_ADMIN
));
4466 goto nla_put_failure
;
4467 genlmsg_end(skb
, dh
);
4469 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4470 /* skb has been consumed or freed in netlink_broadcast() */
4471 if (err
&& err
!= -ESRCH
)
4479 drbd_err(resource
, "Error %d while broadcasting event. Event seq:%u\n",
4483 void notify_device_state(struct sk_buff
*skb
,
4485 struct drbd_device
*device
,
4486 struct device_info
*device_info
,
4487 enum drbd_notification_type type
)
4489 struct device_statistics device_statistics
;
4490 struct drbd_genlmsghdr
*dh
;
4491 bool multicast
= false;
4495 seq
= atomic_inc_return(¬ify_genl_seq
);
4496 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4504 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_DEVICE_STATE
);
4506 goto nla_put_failure
;
4507 dh
->minor
= device
->minor
;
4508 dh
->ret_code
= NO_ERROR
;
4509 if (nla_put_drbd_cfg_context(skb
, device
->resource
, NULL
, device
) ||
4510 nla_put_notification_header(skb
, type
) ||
4511 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4512 device_info_to_skb(skb
, device_info
, true)))
4513 goto nla_put_failure
;
4514 device_to_statistics(&device_statistics
, device
);
4515 device_statistics_to_skb(skb
, &device_statistics
, !capable(CAP_SYS_ADMIN
));
4516 genlmsg_end(skb
, dh
);
4518 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4519 /* skb has been consumed or freed in netlink_broadcast() */
4520 if (err
&& err
!= -ESRCH
)
4528 drbd_err(device
, "Error %d while broadcasting event. Event seq:%u\n",
4532 void notify_connection_state(struct sk_buff
*skb
,
4534 struct drbd_connection
*connection
,
4535 struct connection_info
*connection_info
,
4536 enum drbd_notification_type type
)
4538 struct connection_statistics connection_statistics
;
4539 struct drbd_genlmsghdr
*dh
;
4540 bool multicast
= false;
4544 seq
= atomic_inc_return(¬ify_genl_seq
);
4545 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4553 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_CONNECTION_STATE
);
4555 goto nla_put_failure
;
4557 dh
->ret_code
= NO_ERROR
;
4558 if (nla_put_drbd_cfg_context(skb
, connection
->resource
, connection
, NULL
) ||
4559 nla_put_notification_header(skb
, type
) ||
4560 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4561 connection_info_to_skb(skb
, connection_info
, true)))
4562 goto nla_put_failure
;
4563 connection_statistics
.conn_congested
= test_bit(NET_CONGESTED
, &connection
->flags
);
4564 connection_statistics_to_skb(skb
, &connection_statistics
, !capable(CAP_SYS_ADMIN
));
4565 genlmsg_end(skb
, dh
);
4567 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4568 /* skb has been consumed or freed in netlink_broadcast() */
4569 if (err
&& err
!= -ESRCH
)
4577 drbd_err(connection
, "Error %d while broadcasting event. Event seq:%u\n",
4581 void notify_peer_device_state(struct sk_buff
*skb
,
4583 struct drbd_peer_device
*peer_device
,
4584 struct peer_device_info
*peer_device_info
,
4585 enum drbd_notification_type type
)
4587 struct peer_device_statistics peer_device_statistics
;
4588 struct drbd_resource
*resource
= peer_device
->device
->resource
;
4589 struct drbd_genlmsghdr
*dh
;
4590 bool multicast
= false;
4594 seq
= atomic_inc_return(¬ify_genl_seq
);
4595 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4603 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_PEER_DEVICE_STATE
);
4605 goto nla_put_failure
;
4607 dh
->ret_code
= NO_ERROR
;
4608 if (nla_put_drbd_cfg_context(skb
, resource
, peer_device
->connection
, peer_device
->device
) ||
4609 nla_put_notification_header(skb
, type
) ||
4610 ((type
& ~NOTIFY_FLAGS
) != NOTIFY_DESTROY
&&
4611 peer_device_info_to_skb(skb
, peer_device_info
, true)))
4612 goto nla_put_failure
;
4613 peer_device_to_statistics(&peer_device_statistics
, peer_device
);
4614 peer_device_statistics_to_skb(skb
, &peer_device_statistics
, !capable(CAP_SYS_ADMIN
));
4615 genlmsg_end(skb
, dh
);
4617 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4618 /* skb has been consumed or freed in netlink_broadcast() */
4619 if (err
&& err
!= -ESRCH
)
4627 drbd_err(peer_device
, "Error %d while broadcasting event. Event seq:%u\n",
4631 void notify_helper(enum drbd_notification_type type
,
4632 struct drbd_device
*device
, struct drbd_connection
*connection
,
4633 const char *name
, int status
)
4635 struct drbd_resource
*resource
= device
? device
->resource
: connection
->resource
;
4636 struct drbd_helper_info helper_info
;
4637 unsigned int seq
= atomic_inc_return(¬ify_genl_seq
);
4638 struct sk_buff
*skb
= NULL
;
4639 struct drbd_genlmsghdr
*dh
;
4642 strlcpy(helper_info
.helper_name
, name
, sizeof(helper_info
.helper_name
));
4643 helper_info
.helper_name_len
= min(strlen(name
), sizeof(helper_info
.helper_name
));
4644 helper_info
.helper_status
= status
;
4646 skb
= genlmsg_new(NLMSG_GOODSIZE
, GFP_NOIO
);
4652 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_HELPER
);
4655 dh
->minor
= device
? device
->minor
: -1;
4656 dh
->ret_code
= NO_ERROR
;
4657 mutex_lock(¬ification_mutex
);
4658 if (nla_put_drbd_cfg_context(skb
, resource
, connection
, device
) ||
4659 nla_put_notification_header(skb
, type
) ||
4660 drbd_helper_info_to_skb(skb
, &helper_info
, true))
4662 genlmsg_end(skb
, dh
);
4663 err
= drbd_genl_multicast_events(skb
, GFP_NOWAIT
);
4665 /* skb has been consumed or freed in netlink_broadcast() */
4666 if (err
&& err
!= -ESRCH
)
4668 mutex_unlock(¬ification_mutex
);
4672 mutex_unlock(¬ification_mutex
);
4675 drbd_err(resource
, "Error %d while broadcasting event. Event seq:%u\n",
4679 static void notify_initial_state_done(struct sk_buff
*skb
, unsigned int seq
)
4681 struct drbd_genlmsghdr
*dh
;
4685 dh
= genlmsg_put(skb
, 0, seq
, &drbd_genl_family
, 0, DRBD_INITIAL_STATE_DONE
);
4687 goto nla_put_failure
;
4689 dh
->ret_code
= NO_ERROR
;
4690 if (nla_put_notification_header(skb
, NOTIFY_EXISTS
))
4691 goto nla_put_failure
;
4692 genlmsg_end(skb
, dh
);
4697 pr_err("Error %d sending event. Event seq:%u\n", err
, seq
);
4700 static void free_state_changes(struct list_head
*list
)
4702 while (!list_empty(list
)) {
4703 struct drbd_state_change
*state_change
=
4704 list_first_entry(list
, struct drbd_state_change
, list
);
4705 list_del(&state_change
->list
);
4706 forget_state_change(state_change
);
4710 static unsigned int notifications_for_state_change(struct drbd_state_change
*state_change
)
4713 state_change
->n_connections
+
4714 state_change
->n_devices
+
4715 state_change
->n_devices
* state_change
->n_connections
;
4718 static int get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4720 struct drbd_state_change
*state_change
= (struct drbd_state_change
*)cb
->args
[0];
4721 unsigned int seq
= cb
->args
[2];
4723 enum drbd_notification_type flags
= 0;
4725 /* There is no need for taking notification_mutex here: it doesn't
4726 matter if the initial state events mix with later state chage
4727 events; we can always tell the events apart by the NOTIFY_EXISTS
4731 if (cb
->args
[5] == 1) {
4732 notify_initial_state_done(skb
, seq
);
4736 if (cb
->args
[4] < cb
->args
[3])
4737 flags
|= NOTIFY_CONTINUES
;
4739 notify_resource_state_change(skb
, seq
, state_change
->resource
,
4740 NOTIFY_EXISTS
| flags
);
4744 if (n
< state_change
->n_connections
) {
4745 notify_connection_state_change(skb
, seq
, &state_change
->connections
[n
],
4746 NOTIFY_EXISTS
| flags
);
4749 n
-= state_change
->n_connections
;
4750 if (n
< state_change
->n_devices
) {
4751 notify_device_state_change(skb
, seq
, &state_change
->devices
[n
],
4752 NOTIFY_EXISTS
| flags
);
4755 n
-= state_change
->n_devices
;
4756 if (n
< state_change
->n_devices
* state_change
->n_connections
) {
4757 notify_peer_device_state_change(skb
, seq
, &state_change
->peer_devices
[n
],
4758 NOTIFY_EXISTS
| flags
);
4763 if (cb
->args
[4] == cb
->args
[3]) {
4764 struct drbd_state_change
*next_state_change
=
4765 list_entry(state_change
->list
.next
,
4766 struct drbd_state_change
, list
);
4767 cb
->args
[0] = (long)next_state_change
;
4768 cb
->args
[3] = notifications_for_state_change(next_state_change
);
4775 int drbd_adm_get_initial_state(struct sk_buff
*skb
, struct netlink_callback
*cb
)
4777 struct drbd_resource
*resource
;
4780 if (cb
->args
[5] >= 1) {
4781 if (cb
->args
[5] > 1)
4782 return get_initial_state(skb
, cb
);
4784 struct drbd_state_change
*state_change
=
4785 (struct drbd_state_change
*)cb
->args
[0];
4787 /* connect list to head */
4788 list_add(&head
, &state_change
->list
);
4789 free_state_changes(&head
);
4794 cb
->args
[5] = 2; /* number of iterations */
4795 mutex_lock(&resources_mutex
);
4796 for_each_resource(resource
, &drbd_resources
) {
4797 struct drbd_state_change
*state_change
;
4799 state_change
= remember_old_state(resource
, GFP_KERNEL
);
4800 if (!state_change
) {
4801 if (!list_empty(&head
))
4802 free_state_changes(&head
);
4803 mutex_unlock(&resources_mutex
);
4806 copy_old_to_new_state_change(state_change
);
4807 list_add_tail(&state_change
->list
, &head
);
4808 cb
->args
[5] += notifications_for_state_change(state_change
);
4810 mutex_unlock(&resources_mutex
);
4812 if (!list_empty(&head
)) {
4813 struct drbd_state_change
*state_change
=
4814 list_entry(head
.next
, struct drbd_state_change
, list
);
4815 cb
->args
[0] = (long)state_change
;
4816 cb
->args
[3] = notifications_for_state_change(state_change
);
4817 list_del(&head
); /* detach list from head */
4820 cb
->args
[2] = cb
->nlh
->nlmsg_seq
;
4821 return get_initial_state(skb
, cb
);