4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 Thanks to Carter Burden, Bart Grantham and Gennadiy Nerubayev
11 from Logicworks, Inc. for making SDP replication support possible.
13 drbd is free software; you can redistribute it and/or modify
14 it under the terms of the GNU General Public License as published by
15 the Free Software Foundation; either version 2, or (at your option)
18 drbd is distributed in the hope that it will be useful,
19 but WITHOUT ANY WARRANTY; without even the implied warranty of
20 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 GNU General Public License for more details.
23 You should have received a copy of the GNU General Public License
24 along with drbd; see the file COPYING. If not, write to
25 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31 #include <linux/module.h>
32 #include <linux/jiffies.h>
33 #include <linux/drbd.h>
34 #include <linux/uaccess.h>
35 #include <asm/types.h>
37 #include <linux/ctype.h>
38 #include <linux/mutex.h>
40 #include <linux/file.h>
41 #include <linux/proc_fs.h>
42 #include <linux/init.h>
44 #include <linux/memcontrol.h>
45 #include <linux/mm_inline.h>
46 #include <linux/slab.h>
47 #include <linux/random.h>
48 #include <linux/reboot.h>
49 #include <linux/notifier.h>
50 #include <linux/kthread.h>
51 #include <linux/workqueue.h>
52 #define __KERNEL_SYSCALLS__
53 #include <linux/unistd.h>
54 #include <linux/vmalloc.h>
55 #include <linux/sched/signal.h>
57 #include <linux/drbd_limits.h>
59 #include "drbd_protocol.h"
60 #include "drbd_req.h" /* only for _req_mod in tl_release and tl_clear */
62 #include "drbd_debugfs.h"
64 static DEFINE_MUTEX(drbd_main_mutex
);
65 static int drbd_open(struct block_device
*bdev
, fmode_t mode
);
66 static void drbd_release(struct gendisk
*gd
, fmode_t mode
);
67 static void md_sync_timer_fn(struct timer_list
*t
);
68 static int w_bitmap_io(struct drbd_work
*w
, int unused
);
70 MODULE_AUTHOR("Philipp Reisner <phil@linbit.com>, "
71 "Lars Ellenberg <lars@linbit.com>");
72 MODULE_DESCRIPTION("drbd - Distributed Replicated Block Device v" REL_VERSION
);
73 MODULE_VERSION(REL_VERSION
);
74 MODULE_LICENSE("GPL");
75 MODULE_PARM_DESC(minor_count
, "Approximate number of drbd devices ("
76 __stringify(DRBD_MINOR_COUNT_MIN
) "-" __stringify(DRBD_MINOR_COUNT_MAX
) ")");
77 MODULE_ALIAS_BLOCKDEV_MAJOR(DRBD_MAJOR
);
79 #include <linux/moduleparam.h>
80 /* thanks to these macros, if compiled into the kernel (not-module),
81 * these become boot parameters (e.g., drbd.minor_count) */
83 #ifdef CONFIG_DRBD_FAULT_INJECTION
84 int drbd_enable_faults
;
86 static int drbd_fault_count
;
87 static int drbd_fault_devs
;
88 /* bitmap of enabled faults */
89 module_param_named(enable_faults
, drbd_enable_faults
, int, 0664);
90 /* fault rate % value - applies to all enabled faults */
91 module_param_named(fault_rate
, drbd_fault_rate
, int, 0664);
92 /* count of faults inserted */
93 module_param_named(fault_count
, drbd_fault_count
, int, 0664);
94 /* bitmap of devices to insert faults on */
95 module_param_named(fault_devs
, drbd_fault_devs
, int, 0644);
98 /* module parameters we can keep static */
99 static bool drbd_allow_oos
; /* allow_open_on_secondary */
100 static bool drbd_disable_sendpage
;
101 MODULE_PARM_DESC(allow_oos
, "DONT USE!");
102 module_param_named(allow_oos
, drbd_allow_oos
, bool, 0);
103 module_param_named(disable_sendpage
, drbd_disable_sendpage
, bool, 0644);
105 /* module parameters we share */
106 int drbd_proc_details
; /* Detail level in proc drbd*/
107 module_param_named(proc_details
, drbd_proc_details
, int, 0644);
108 /* module parameters shared with defaults */
109 unsigned int drbd_minor_count
= DRBD_MINOR_COUNT_DEF
;
110 /* Module parameter for setting the user mode helper program
111 * to run. Default is /sbin/drbdadm */
112 char drbd_usermode_helper
[80] = "/sbin/drbdadm";
113 module_param_named(minor_count
, drbd_minor_count
, uint
, 0444);
114 module_param_string(usermode_helper
, drbd_usermode_helper
, sizeof(drbd_usermode_helper
), 0644);
116 /* in 2.6.x, our device mapping and config info contains our virtual gendisks
117 * as member "struct gendisk *vdisk;"
119 struct idr drbd_devices
;
120 struct list_head drbd_resources
;
121 struct mutex resources_mutex
;
123 struct kmem_cache
*drbd_request_cache
;
124 struct kmem_cache
*drbd_ee_cache
; /* peer requests */
125 struct kmem_cache
*drbd_bm_ext_cache
; /* bitmap extents */
126 struct kmem_cache
*drbd_al_ext_cache
; /* activity log extents */
127 mempool_t drbd_request_mempool
;
128 mempool_t drbd_ee_mempool
;
129 mempool_t drbd_md_io_page_pool
;
130 struct bio_set drbd_md_io_bio_set
;
131 struct bio_set drbd_io_bio_set
;
133 /* I do not use a standard mempool, because:
134 1) I want to hand out the pre-allocated objects first.
135 2) I want to be able to interrupt sleeping allocation with a signal.
136 Note: This is a single linked list, the next pointer is the private
137 member of struct page.
139 struct page
*drbd_pp_pool
;
140 spinlock_t drbd_pp_lock
;
142 wait_queue_head_t drbd_pp_wait
;
144 DEFINE_RATELIMIT_STATE(drbd_ratelimit_state
, 5 * HZ
, 5);
146 static const struct block_device_operations drbd_ops
= {
147 .owner
= THIS_MODULE
,
149 .release
= drbd_release
,
152 struct bio
*bio_alloc_drbd(gfp_t gfp_mask
)
156 if (!bioset_initialized(&drbd_md_io_bio_set
))
157 return bio_alloc(gfp_mask
, 1);
159 bio
= bio_alloc_bioset(gfp_mask
, 1, &drbd_md_io_bio_set
);
166 /* When checking with sparse, and this is an inline function, sparse will
167 give tons of false positives. When this is a real functions sparse works.
169 int _get_ldev_if_state(struct drbd_device
*device
, enum drbd_disk_state mins
)
173 atomic_inc(&device
->local_cnt
);
174 io_allowed
= (device
->state
.disk
>= mins
);
176 if (atomic_dec_and_test(&device
->local_cnt
))
177 wake_up(&device
->misc_wait
);
185 * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
186 * @connection: DRBD connection.
187 * @barrier_nr: Expected identifier of the DRBD write barrier packet.
188 * @set_size: Expected number of requests before that barrier.
190 * In case the passed barrier_nr or set_size does not match the oldest
191 * epoch of not yet barrier-acked requests, this function will cause a
192 * termination of the connection.
194 void tl_release(struct drbd_connection
*connection
, unsigned int barrier_nr
,
195 unsigned int set_size
)
197 struct drbd_request
*r
;
198 struct drbd_request
*req
= NULL
;
199 int expect_epoch
= 0;
202 spin_lock_irq(&connection
->resource
->req_lock
);
204 /* find oldest not yet barrier-acked write request,
205 * count writes in its epoch. */
206 list_for_each_entry(r
, &connection
->transfer_log
, tl_requests
) {
207 const unsigned s
= r
->rq_state
;
211 if (!(s
& RQ_NET_MASK
))
216 expect_epoch
= req
->epoch
;
219 if (r
->epoch
!= expect_epoch
)
223 /* if (s & RQ_DONE): not expected */
224 /* if (!(s & RQ_NET_MASK)): not expected */
229 /* first some paranoia code */
231 drbd_err(connection
, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
235 if (expect_epoch
!= barrier_nr
) {
236 drbd_err(connection
, "BAD! BarrierAck #%u received, expected #%u!\n",
237 barrier_nr
, expect_epoch
);
241 if (expect_size
!= set_size
) {
242 drbd_err(connection
, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
243 barrier_nr
, set_size
, expect_size
);
247 /* Clean up list of requests processed during current epoch. */
248 /* this extra list walk restart is paranoia,
249 * to catch requests being barrier-acked "unexpectedly".
250 * It usually should find the same req again, or some READ preceding it. */
251 list_for_each_entry(req
, &connection
->transfer_log
, tl_requests
)
252 if (req
->epoch
== expect_epoch
)
254 list_for_each_entry_safe_from(req
, r
, &connection
->transfer_log
, tl_requests
) {
255 if (req
->epoch
!= expect_epoch
)
257 _req_mod(req
, BARRIER_ACKED
);
259 spin_unlock_irq(&connection
->resource
->req_lock
);
264 spin_unlock_irq(&connection
->resource
->req_lock
);
265 conn_request_state(connection
, NS(conn
, C_PROTOCOL_ERROR
), CS_HARD
);
270 * _tl_restart() - Walks the transfer log, and applies an action to all requests
271 * @connection: DRBD connection to operate on.
272 * @what: The action/event to perform with all request objects
274 * @what might be one of CONNECTION_LOST_WHILE_PENDING, RESEND, FAIL_FROZEN_DISK_IO,
275 * RESTART_FROZEN_DISK_IO.
277 /* must hold resource->req_lock */
278 void _tl_restart(struct drbd_connection
*connection
, enum drbd_req_event what
)
280 struct drbd_request
*req
, *r
;
282 list_for_each_entry_safe(req
, r
, &connection
->transfer_log
, tl_requests
)
286 void tl_restart(struct drbd_connection
*connection
, enum drbd_req_event what
)
288 spin_lock_irq(&connection
->resource
->req_lock
);
289 _tl_restart(connection
, what
);
290 spin_unlock_irq(&connection
->resource
->req_lock
);
294 * tl_clear() - Clears all requests and &struct drbd_tl_epoch objects out of the TL
295 * @device: DRBD device.
297 * This is called after the connection to the peer was lost. The storage covered
298 * by the requests on the transfer gets marked as our of sync. Called from the
299 * receiver thread and the worker thread.
301 void tl_clear(struct drbd_connection
*connection
)
303 tl_restart(connection
, CONNECTION_LOST_WHILE_PENDING
);
307 * tl_abort_disk_io() - Abort disk I/O for all requests for a certain device in the TL
308 * @device: DRBD device.
310 void tl_abort_disk_io(struct drbd_device
*device
)
312 struct drbd_connection
*connection
= first_peer_device(device
)->connection
;
313 struct drbd_request
*req
, *r
;
315 spin_lock_irq(&connection
->resource
->req_lock
);
316 list_for_each_entry_safe(req
, r
, &connection
->transfer_log
, tl_requests
) {
317 if (!(req
->rq_state
& RQ_LOCAL_PENDING
))
319 if (req
->device
!= device
)
321 _req_mod(req
, ABORT_DISK_IO
);
323 spin_unlock_irq(&connection
->resource
->req_lock
);
326 static int drbd_thread_setup(void *arg
)
328 struct drbd_thread
*thi
= (struct drbd_thread
*) arg
;
329 struct drbd_resource
*resource
= thi
->resource
;
333 snprintf(current
->comm
, sizeof(current
->comm
), "drbd_%c_%s",
337 allow_kernel_signal(DRBD_SIGKILL
);
338 allow_kernel_signal(SIGXCPU
);
340 retval
= thi
->function(thi
);
342 spin_lock_irqsave(&thi
->t_lock
, flags
);
344 /* if the receiver has been "EXITING", the last thing it did
345 * was set the conn state to "StandAlone",
346 * if now a re-connect request comes in, conn state goes C_UNCONNECTED,
347 * and receiver thread will be "started".
348 * drbd_thread_start needs to set "RESTARTING" in that case.
349 * t_state check and assignment needs to be within the same spinlock,
350 * so either thread_start sees EXITING, and can remap to RESTARTING,
351 * or thread_start see NONE, and can proceed as normal.
354 if (thi
->t_state
== RESTARTING
) {
355 drbd_info(resource
, "Restarting %s thread\n", thi
->name
);
356 thi
->t_state
= RUNNING
;
357 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
364 complete_all(&thi
->stop
);
365 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
367 drbd_info(resource
, "Terminating %s\n", current
->comm
);
369 /* Release mod reference taken when thread was started */
372 kref_put(&thi
->connection
->kref
, drbd_destroy_connection
);
373 kref_put(&resource
->kref
, drbd_destroy_resource
);
374 module_put(THIS_MODULE
);
378 static void drbd_thread_init(struct drbd_resource
*resource
, struct drbd_thread
*thi
,
379 int (*func
) (struct drbd_thread
*), const char *name
)
381 spin_lock_init(&thi
->t_lock
);
384 thi
->function
= func
;
385 thi
->resource
= resource
;
386 thi
->connection
= NULL
;
390 int drbd_thread_start(struct drbd_thread
*thi
)
392 struct drbd_resource
*resource
= thi
->resource
;
393 struct task_struct
*nt
;
396 /* is used from state engine doing drbd_thread_stop_nowait,
397 * while holding the req lock irqsave */
398 spin_lock_irqsave(&thi
->t_lock
, flags
);
400 switch (thi
->t_state
) {
402 drbd_info(resource
, "Starting %s thread (from %s [%d])\n",
403 thi
->name
, current
->comm
, current
->pid
);
405 /* Get ref on module for thread - this is released when thread exits */
406 if (!try_module_get(THIS_MODULE
)) {
407 drbd_err(resource
, "Failed to get module reference in drbd_thread_start\n");
408 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
412 kref_get(&resource
->kref
);
414 kref_get(&thi
->connection
->kref
);
416 init_completion(&thi
->stop
);
417 thi
->reset_cpu_mask
= 1;
418 thi
->t_state
= RUNNING
;
419 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
420 flush_signals(current
); /* otherw. may get -ERESTARTNOINTR */
422 nt
= kthread_create(drbd_thread_setup
, (void *) thi
,
423 "drbd_%c_%s", thi
->name
[0], thi
->resource
->name
);
426 drbd_err(resource
, "Couldn't start thread\n");
429 kref_put(&thi
->connection
->kref
, drbd_destroy_connection
);
430 kref_put(&resource
->kref
, drbd_destroy_resource
);
431 module_put(THIS_MODULE
);
434 spin_lock_irqsave(&thi
->t_lock
, flags
);
436 thi
->t_state
= RUNNING
;
437 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
441 thi
->t_state
= RESTARTING
;
442 drbd_info(resource
, "Restarting %s thread (from %s [%d])\n",
443 thi
->name
, current
->comm
, current
->pid
);
448 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
456 void _drbd_thread_stop(struct drbd_thread
*thi
, int restart
, int wait
)
460 enum drbd_thread_state ns
= restart
? RESTARTING
: EXITING
;
462 /* may be called from state engine, holding the req lock irqsave */
463 spin_lock_irqsave(&thi
->t_lock
, flags
);
465 if (thi
->t_state
== NONE
) {
466 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
468 drbd_thread_start(thi
);
472 if (thi
->t_state
!= ns
) {
473 if (thi
->task
== NULL
) {
474 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
480 init_completion(&thi
->stop
);
481 if (thi
->task
!= current
)
482 force_sig(DRBD_SIGKILL
, thi
->task
);
485 spin_unlock_irqrestore(&thi
->t_lock
, flags
);
488 wait_for_completion(&thi
->stop
);
491 int conn_lowest_minor(struct drbd_connection
*connection
)
493 struct drbd_peer_device
*peer_device
;
494 int vnr
= 0, minor
= -1;
497 peer_device
= idr_get_next(&connection
->peer_devices
, &vnr
);
499 minor
= device_to_minor(peer_device
->device
);
507 * drbd_calc_cpu_mask() - Generate CPU masks, spread over all CPUs
509 * Forces all threads of a resource onto the same CPU. This is beneficial for
510 * DRBD's performance. May be overwritten by user's configuration.
512 static void drbd_calc_cpu_mask(cpumask_var_t
*cpu_mask
)
514 unsigned int *resources_per_cpu
, min_index
= ~0;
516 resources_per_cpu
= kcalloc(nr_cpu_ids
, sizeof(*resources_per_cpu
),
518 if (resources_per_cpu
) {
519 struct drbd_resource
*resource
;
520 unsigned int cpu
, min
= ~0;
523 for_each_resource_rcu(resource
, &drbd_resources
) {
524 for_each_cpu(cpu
, resource
->cpu_mask
)
525 resources_per_cpu
[cpu
]++;
528 for_each_online_cpu(cpu
) {
529 if (resources_per_cpu
[cpu
] < min
) {
530 min
= resources_per_cpu
[cpu
];
534 kfree(resources_per_cpu
);
536 if (min_index
== ~0) {
537 cpumask_setall(*cpu_mask
);
540 cpumask_set_cpu(min_index
, *cpu_mask
);
544 * drbd_thread_current_set_cpu() - modifies the cpu mask of the _current_ thread
545 * @device: DRBD device.
546 * @thi: drbd_thread object
548 * call in the "main loop" of _all_ threads, no need for any mutex, current won't die
551 void drbd_thread_current_set_cpu(struct drbd_thread
*thi
)
553 struct drbd_resource
*resource
= thi
->resource
;
554 struct task_struct
*p
= current
;
556 if (!thi
->reset_cpu_mask
)
558 thi
->reset_cpu_mask
= 0;
559 set_cpus_allowed_ptr(p
, resource
->cpu_mask
);
562 #define drbd_calc_cpu_mask(A) ({})
566 * drbd_header_size - size of a packet header
568 * The header size is a multiple of 8, so any payload following the header is
569 * word aligned on 64-bit architectures. (The bitmap send and receive code
572 unsigned int drbd_header_size(struct drbd_connection
*connection
)
574 if (connection
->agreed_pro_version
>= 100) {
575 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100
), 8));
576 return sizeof(struct p_header100
);
578 BUILD_BUG_ON(sizeof(struct p_header80
) !=
579 sizeof(struct p_header95
));
580 BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header80
), 8));
581 return sizeof(struct p_header80
);
585 static unsigned int prepare_header80(struct p_header80
*h
, enum drbd_packet cmd
, int size
)
587 h
->magic
= cpu_to_be32(DRBD_MAGIC
);
588 h
->command
= cpu_to_be16(cmd
);
589 h
->length
= cpu_to_be16(size
);
590 return sizeof(struct p_header80
);
593 static unsigned int prepare_header95(struct p_header95
*h
, enum drbd_packet cmd
, int size
)
595 h
->magic
= cpu_to_be16(DRBD_MAGIC_BIG
);
596 h
->command
= cpu_to_be16(cmd
);
597 h
->length
= cpu_to_be32(size
);
598 return sizeof(struct p_header95
);
601 static unsigned int prepare_header100(struct p_header100
*h
, enum drbd_packet cmd
,
604 h
->magic
= cpu_to_be32(DRBD_MAGIC_100
);
605 h
->volume
= cpu_to_be16(vnr
);
606 h
->command
= cpu_to_be16(cmd
);
607 h
->length
= cpu_to_be32(size
);
609 return sizeof(struct p_header100
);
612 static unsigned int prepare_header(struct drbd_connection
*connection
, int vnr
,
613 void *buffer
, enum drbd_packet cmd
, int size
)
615 if (connection
->agreed_pro_version
>= 100)
616 return prepare_header100(buffer
, cmd
, size
, vnr
);
617 else if (connection
->agreed_pro_version
>= 95 &&
618 size
> DRBD_MAX_SIZE_H80_PACKET
)
619 return prepare_header95(buffer
, cmd
, size
);
621 return prepare_header80(buffer
, cmd
, size
);
624 static void *__conn_prepare_command(struct drbd_connection
*connection
,
625 struct drbd_socket
*sock
)
629 return sock
->sbuf
+ drbd_header_size(connection
);
632 void *conn_prepare_command(struct drbd_connection
*connection
, struct drbd_socket
*sock
)
636 mutex_lock(&sock
->mutex
);
637 p
= __conn_prepare_command(connection
, sock
);
639 mutex_unlock(&sock
->mutex
);
644 void *drbd_prepare_command(struct drbd_peer_device
*peer_device
, struct drbd_socket
*sock
)
646 return conn_prepare_command(peer_device
->connection
, sock
);
649 static int __send_command(struct drbd_connection
*connection
, int vnr
,
650 struct drbd_socket
*sock
, enum drbd_packet cmd
,
651 unsigned int header_size
, void *data
,
658 * Called with @data == NULL and the size of the data blocks in @size
659 * for commands that send data blocks. For those commands, omit the
660 * MSG_MORE flag: this will increase the likelihood that data blocks
661 * which are page aligned on the sender will end up page aligned on the
664 msg_flags
= data
? MSG_MORE
: 0;
666 header_size
+= prepare_header(connection
, vnr
, sock
->sbuf
, cmd
,
668 err
= drbd_send_all(connection
, sock
->socket
, sock
->sbuf
, header_size
,
671 err
= drbd_send_all(connection
, sock
->socket
, data
, size
, 0);
672 /* DRBD protocol "pings" are latency critical.
673 * This is supposed to trigger tcp_push_pending_frames() */
674 if (!err
&& (cmd
== P_PING
|| cmd
== P_PING_ACK
))
675 drbd_tcp_nodelay(sock
->socket
);
680 static int __conn_send_command(struct drbd_connection
*connection
, struct drbd_socket
*sock
,
681 enum drbd_packet cmd
, unsigned int header_size
,
682 void *data
, unsigned int size
)
684 return __send_command(connection
, 0, sock
, cmd
, header_size
, data
, size
);
687 int conn_send_command(struct drbd_connection
*connection
, struct drbd_socket
*sock
,
688 enum drbd_packet cmd
, unsigned int header_size
,
689 void *data
, unsigned int size
)
693 err
= __conn_send_command(connection
, sock
, cmd
, header_size
, data
, size
);
694 mutex_unlock(&sock
->mutex
);
698 int drbd_send_command(struct drbd_peer_device
*peer_device
, struct drbd_socket
*sock
,
699 enum drbd_packet cmd
, unsigned int header_size
,
700 void *data
, unsigned int size
)
704 err
= __send_command(peer_device
->connection
, peer_device
->device
->vnr
,
705 sock
, cmd
, header_size
, data
, size
);
706 mutex_unlock(&sock
->mutex
);
710 int drbd_send_ping(struct drbd_connection
*connection
)
712 struct drbd_socket
*sock
;
714 sock
= &connection
->meta
;
715 if (!conn_prepare_command(connection
, sock
))
717 return conn_send_command(connection
, sock
, P_PING
, 0, NULL
, 0);
720 int drbd_send_ping_ack(struct drbd_connection
*connection
)
722 struct drbd_socket
*sock
;
724 sock
= &connection
->meta
;
725 if (!conn_prepare_command(connection
, sock
))
727 return conn_send_command(connection
, sock
, P_PING_ACK
, 0, NULL
, 0);
730 int drbd_send_sync_param(struct drbd_peer_device
*peer_device
)
732 struct drbd_socket
*sock
;
733 struct p_rs_param_95
*p
;
735 const int apv
= peer_device
->connection
->agreed_pro_version
;
736 enum drbd_packet cmd
;
738 struct disk_conf
*dc
;
740 sock
= &peer_device
->connection
->data
;
741 p
= drbd_prepare_command(peer_device
, sock
);
746 nc
= rcu_dereference(peer_device
->connection
->net_conf
);
748 size
= apv
<= 87 ? sizeof(struct p_rs_param
)
749 : apv
== 88 ? sizeof(struct p_rs_param
)
750 + strlen(nc
->verify_alg
) + 1
751 : apv
<= 94 ? sizeof(struct p_rs_param_89
)
752 : /* apv >= 95 */ sizeof(struct p_rs_param_95
);
754 cmd
= apv
>= 89 ? P_SYNC_PARAM89
: P_SYNC_PARAM
;
756 /* initialize verify_alg and csums_alg */
757 memset(p
->verify_alg
, 0, 2 * SHARED_SECRET_MAX
);
759 if (get_ldev(peer_device
->device
)) {
760 dc
= rcu_dereference(peer_device
->device
->ldev
->disk_conf
);
761 p
->resync_rate
= cpu_to_be32(dc
->resync_rate
);
762 p
->c_plan_ahead
= cpu_to_be32(dc
->c_plan_ahead
);
763 p
->c_delay_target
= cpu_to_be32(dc
->c_delay_target
);
764 p
->c_fill_target
= cpu_to_be32(dc
->c_fill_target
);
765 p
->c_max_rate
= cpu_to_be32(dc
->c_max_rate
);
766 put_ldev(peer_device
->device
);
768 p
->resync_rate
= cpu_to_be32(DRBD_RESYNC_RATE_DEF
);
769 p
->c_plan_ahead
= cpu_to_be32(DRBD_C_PLAN_AHEAD_DEF
);
770 p
->c_delay_target
= cpu_to_be32(DRBD_C_DELAY_TARGET_DEF
);
771 p
->c_fill_target
= cpu_to_be32(DRBD_C_FILL_TARGET_DEF
);
772 p
->c_max_rate
= cpu_to_be32(DRBD_C_MAX_RATE_DEF
);
776 strcpy(p
->verify_alg
, nc
->verify_alg
);
778 strcpy(p
->csums_alg
, nc
->csums_alg
);
781 return drbd_send_command(peer_device
, sock
, cmd
, size
, NULL
, 0);
784 int __drbd_send_protocol(struct drbd_connection
*connection
, enum drbd_packet cmd
)
786 struct drbd_socket
*sock
;
787 struct p_protocol
*p
;
791 sock
= &connection
->data
;
792 p
= __conn_prepare_command(connection
, sock
);
797 nc
= rcu_dereference(connection
->net_conf
);
799 if (nc
->tentative
&& connection
->agreed_pro_version
< 92) {
801 drbd_err(connection
, "--dry-run is not supported by peer");
806 if (connection
->agreed_pro_version
>= 87)
807 size
+= strlen(nc
->integrity_alg
) + 1;
809 p
->protocol
= cpu_to_be32(nc
->wire_protocol
);
810 p
->after_sb_0p
= cpu_to_be32(nc
->after_sb_0p
);
811 p
->after_sb_1p
= cpu_to_be32(nc
->after_sb_1p
);
812 p
->after_sb_2p
= cpu_to_be32(nc
->after_sb_2p
);
813 p
->two_primaries
= cpu_to_be32(nc
->two_primaries
);
815 if (nc
->discard_my_data
)
816 cf
|= CF_DISCARD_MY_DATA
;
819 p
->conn_flags
= cpu_to_be32(cf
);
821 if (connection
->agreed_pro_version
>= 87)
822 strcpy(p
->integrity_alg
, nc
->integrity_alg
);
825 return __conn_send_command(connection
, sock
, cmd
, size
, NULL
, 0);
828 int drbd_send_protocol(struct drbd_connection
*connection
)
832 mutex_lock(&connection
->data
.mutex
);
833 err
= __drbd_send_protocol(connection
, P_PROTOCOL
);
834 mutex_unlock(&connection
->data
.mutex
);
839 static int _drbd_send_uuids(struct drbd_peer_device
*peer_device
, u64 uuid_flags
)
841 struct drbd_device
*device
= peer_device
->device
;
842 struct drbd_socket
*sock
;
846 if (!get_ldev_if_state(device
, D_NEGOTIATING
))
849 sock
= &peer_device
->connection
->data
;
850 p
= drbd_prepare_command(peer_device
, sock
);
855 spin_lock_irq(&device
->ldev
->md
.uuid_lock
);
856 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
857 p
->uuid
[i
] = cpu_to_be64(device
->ldev
->md
.uuid
[i
]);
858 spin_unlock_irq(&device
->ldev
->md
.uuid_lock
);
860 device
->comm_bm_set
= drbd_bm_total_weight(device
);
861 p
->uuid
[UI_SIZE
] = cpu_to_be64(device
->comm_bm_set
);
863 uuid_flags
|= rcu_dereference(peer_device
->connection
->net_conf
)->discard_my_data
? 1 : 0;
865 uuid_flags
|= test_bit(CRASHED_PRIMARY
, &device
->flags
) ? 2 : 0;
866 uuid_flags
|= device
->new_state_tmp
.disk
== D_INCONSISTENT
? 4 : 0;
867 p
->uuid
[UI_FLAGS
] = cpu_to_be64(uuid_flags
);
870 return drbd_send_command(peer_device
, sock
, P_UUIDS
, sizeof(*p
), NULL
, 0);
873 int drbd_send_uuids(struct drbd_peer_device
*peer_device
)
875 return _drbd_send_uuids(peer_device
, 0);
878 int drbd_send_uuids_skip_initial_sync(struct drbd_peer_device
*peer_device
)
880 return _drbd_send_uuids(peer_device
, 8);
883 void drbd_print_uuids(struct drbd_device
*device
, const char *text
)
885 if (get_ldev_if_state(device
, D_NEGOTIATING
)) {
886 u64
*uuid
= device
->ldev
->md
.uuid
;
887 drbd_info(device
, "%s %016llX:%016llX:%016llX:%016llX\n",
889 (unsigned long long)uuid
[UI_CURRENT
],
890 (unsigned long long)uuid
[UI_BITMAP
],
891 (unsigned long long)uuid
[UI_HISTORY_START
],
892 (unsigned long long)uuid
[UI_HISTORY_END
]);
895 drbd_info(device
, "%s effective data uuid: %016llX\n",
897 (unsigned long long)device
->ed_uuid
);
901 void drbd_gen_and_send_sync_uuid(struct drbd_peer_device
*peer_device
)
903 struct drbd_device
*device
= peer_device
->device
;
904 struct drbd_socket
*sock
;
908 D_ASSERT(device
, device
->state
.disk
== D_UP_TO_DATE
);
910 uuid
= device
->ldev
->md
.uuid
[UI_BITMAP
];
911 if (uuid
&& uuid
!= UUID_JUST_CREATED
)
912 uuid
= uuid
+ UUID_NEW_BM_OFFSET
;
914 get_random_bytes(&uuid
, sizeof(u64
));
915 drbd_uuid_set(device
, UI_BITMAP
, uuid
);
916 drbd_print_uuids(device
, "updated sync UUID");
917 drbd_md_sync(device
);
919 sock
= &peer_device
->connection
->data
;
920 p
= drbd_prepare_command(peer_device
, sock
);
922 p
->uuid
= cpu_to_be64(uuid
);
923 drbd_send_command(peer_device
, sock
, P_SYNC_UUID
, sizeof(*p
), NULL
, 0);
927 /* communicated if (agreed_features & DRBD_FF_WSAME) */
929 assign_p_sizes_qlim(struct drbd_device
*device
, struct p_sizes
*p
,
930 struct request_queue
*q
)
933 p
->qlim
->physical_block_size
= cpu_to_be32(queue_physical_block_size(q
));
934 p
->qlim
->logical_block_size
= cpu_to_be32(queue_logical_block_size(q
));
935 p
->qlim
->alignment_offset
= cpu_to_be32(queue_alignment_offset(q
));
936 p
->qlim
->io_min
= cpu_to_be32(queue_io_min(q
));
937 p
->qlim
->io_opt
= cpu_to_be32(queue_io_opt(q
));
938 p
->qlim
->discard_enabled
= blk_queue_discard(q
);
939 p
->qlim
->write_same_capable
= !!q
->limits
.max_write_same_sectors
;
941 q
= device
->rq_queue
;
942 p
->qlim
->physical_block_size
= cpu_to_be32(queue_physical_block_size(q
));
943 p
->qlim
->logical_block_size
= cpu_to_be32(queue_logical_block_size(q
));
944 p
->qlim
->alignment_offset
= 0;
945 p
->qlim
->io_min
= cpu_to_be32(queue_io_min(q
));
946 p
->qlim
->io_opt
= cpu_to_be32(queue_io_opt(q
));
947 p
->qlim
->discard_enabled
= 0;
948 p
->qlim
->write_same_capable
= 0;
952 int drbd_send_sizes(struct drbd_peer_device
*peer_device
, int trigger_reply
, enum dds_flags flags
)
954 struct drbd_device
*device
= peer_device
->device
;
955 struct drbd_socket
*sock
;
957 sector_t d_size
, u_size
;
959 unsigned int max_bio_size
;
960 unsigned int packet_size
;
962 sock
= &peer_device
->connection
->data
;
963 p
= drbd_prepare_command(peer_device
, sock
);
967 packet_size
= sizeof(*p
);
968 if (peer_device
->connection
->agreed_features
& DRBD_FF_WSAME
)
969 packet_size
+= sizeof(p
->qlim
[0]);
971 memset(p
, 0, packet_size
);
972 if (get_ldev_if_state(device
, D_NEGOTIATING
)) {
973 struct request_queue
*q
= bdev_get_queue(device
->ldev
->backing_bdev
);
974 d_size
= drbd_get_max_capacity(device
->ldev
);
976 u_size
= rcu_dereference(device
->ldev
->disk_conf
)->disk_size
;
978 q_order_type
= drbd_queue_order_type(device
);
979 max_bio_size
= queue_max_hw_sectors(q
) << 9;
980 max_bio_size
= min(max_bio_size
, DRBD_MAX_BIO_SIZE
);
981 assign_p_sizes_qlim(device
, p
, q
);
986 q_order_type
= QUEUE_ORDERED_NONE
;
987 max_bio_size
= DRBD_MAX_BIO_SIZE
; /* ... multiple BIOs per peer_request */
988 assign_p_sizes_qlim(device
, p
, NULL
);
991 if (peer_device
->connection
->agreed_pro_version
<= 94)
992 max_bio_size
= min(max_bio_size
, DRBD_MAX_SIZE_H80_PACKET
);
993 else if (peer_device
->connection
->agreed_pro_version
< 100)
994 max_bio_size
= min(max_bio_size
, DRBD_MAX_BIO_SIZE_P95
);
996 p
->d_size
= cpu_to_be64(d_size
);
997 p
->u_size
= cpu_to_be64(u_size
);
998 p
->c_size
= cpu_to_be64(trigger_reply
? 0 : drbd_get_capacity(device
->this_bdev
));
999 p
->max_bio_size
= cpu_to_be32(max_bio_size
);
1000 p
->queue_order_type
= cpu_to_be16(q_order_type
);
1001 p
->dds_flags
= cpu_to_be16(flags
);
1003 return drbd_send_command(peer_device
, sock
, P_SIZES
, packet_size
, NULL
, 0);
1007 * drbd_send_current_state() - Sends the drbd state to the peer
1008 * @peer_device: DRBD peer device.
1010 int drbd_send_current_state(struct drbd_peer_device
*peer_device
)
1012 struct drbd_socket
*sock
;
1015 sock
= &peer_device
->connection
->data
;
1016 p
= drbd_prepare_command(peer_device
, sock
);
1019 p
->state
= cpu_to_be32(peer_device
->device
->state
.i
); /* Within the send mutex */
1020 return drbd_send_command(peer_device
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
1024 * drbd_send_state() - After a state change, sends the new state to the peer
1025 * @peer_device: DRBD peer device.
1026 * @state: the state to send, not necessarily the current state.
1028 * Each state change queues an "after_state_ch" work, which will eventually
1029 * send the resulting new state to the peer. If more state changes happen
1030 * between queuing and processing of the after_state_ch work, we still
1031 * want to send each intermediary state in the order it occurred.
1033 int drbd_send_state(struct drbd_peer_device
*peer_device
, union drbd_state state
)
1035 struct drbd_socket
*sock
;
1038 sock
= &peer_device
->connection
->data
;
1039 p
= drbd_prepare_command(peer_device
, sock
);
1042 p
->state
= cpu_to_be32(state
.i
); /* Within the send mutex */
1043 return drbd_send_command(peer_device
, sock
, P_STATE
, sizeof(*p
), NULL
, 0);
1046 int drbd_send_state_req(struct drbd_peer_device
*peer_device
, union drbd_state mask
, union drbd_state val
)
1048 struct drbd_socket
*sock
;
1049 struct p_req_state
*p
;
1051 sock
= &peer_device
->connection
->data
;
1052 p
= drbd_prepare_command(peer_device
, sock
);
1055 p
->mask
= cpu_to_be32(mask
.i
);
1056 p
->val
= cpu_to_be32(val
.i
);
1057 return drbd_send_command(peer_device
, sock
, P_STATE_CHG_REQ
, sizeof(*p
), NULL
, 0);
1060 int conn_send_state_req(struct drbd_connection
*connection
, union drbd_state mask
, union drbd_state val
)
1062 enum drbd_packet cmd
;
1063 struct drbd_socket
*sock
;
1064 struct p_req_state
*p
;
1066 cmd
= connection
->agreed_pro_version
< 100 ? P_STATE_CHG_REQ
: P_CONN_ST_CHG_REQ
;
1067 sock
= &connection
->data
;
1068 p
= conn_prepare_command(connection
, sock
);
1071 p
->mask
= cpu_to_be32(mask
.i
);
1072 p
->val
= cpu_to_be32(val
.i
);
1073 return conn_send_command(connection
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1076 void drbd_send_sr_reply(struct drbd_peer_device
*peer_device
, enum drbd_state_rv retcode
)
1078 struct drbd_socket
*sock
;
1079 struct p_req_state_reply
*p
;
1081 sock
= &peer_device
->connection
->meta
;
1082 p
= drbd_prepare_command(peer_device
, sock
);
1084 p
->retcode
= cpu_to_be32(retcode
);
1085 drbd_send_command(peer_device
, sock
, P_STATE_CHG_REPLY
, sizeof(*p
), NULL
, 0);
1089 void conn_send_sr_reply(struct drbd_connection
*connection
, enum drbd_state_rv retcode
)
1091 struct drbd_socket
*sock
;
1092 struct p_req_state_reply
*p
;
1093 enum drbd_packet cmd
= connection
->agreed_pro_version
< 100 ? P_STATE_CHG_REPLY
: P_CONN_ST_CHG_REPLY
;
1095 sock
= &connection
->meta
;
1096 p
= conn_prepare_command(connection
, sock
);
1098 p
->retcode
= cpu_to_be32(retcode
);
1099 conn_send_command(connection
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1103 static void dcbp_set_code(struct p_compressed_bm
*p
, enum drbd_bitmap_code code
)
1105 BUG_ON(code
& ~0xf);
1106 p
->encoding
= (p
->encoding
& ~0xf) | code
;
1109 static void dcbp_set_start(struct p_compressed_bm
*p
, int set
)
1111 p
->encoding
= (p
->encoding
& ~0x80) | (set
? 0x80 : 0);
1114 static void dcbp_set_pad_bits(struct p_compressed_bm
*p
, int n
)
1117 p
->encoding
= (p
->encoding
& (~0x7 << 4)) | (n
<< 4);
1120 static int fill_bitmap_rle_bits(struct drbd_device
*device
,
1121 struct p_compressed_bm
*p
,
1123 struct bm_xfer_ctx
*c
)
1125 struct bitstream bs
;
1126 unsigned long plain_bits
;
1133 /* may we use this feature? */
1135 use_rle
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
)->use_rle
;
1137 if (!use_rle
|| first_peer_device(device
)->connection
->agreed_pro_version
< 90)
1140 if (c
->bit_offset
>= c
->bm_bits
)
1141 return 0; /* nothing to do. */
1143 /* use at most thus many bytes */
1144 bitstream_init(&bs
, p
->code
, size
, 0);
1145 memset(p
->code
, 0, size
);
1146 /* plain bits covered in this code string */
1149 /* p->encoding & 0x80 stores whether the first run length is set.
1150 * bit offset is implicit.
1151 * start with toggle == 2 to be able to tell the first iteration */
1154 /* see how much plain bits we can stuff into one packet
1155 * using RLE and VLI. */
1157 tmp
= (toggle
== 0) ? _drbd_bm_find_next_zero(device
, c
->bit_offset
)
1158 : _drbd_bm_find_next(device
, c
->bit_offset
);
1161 rl
= tmp
- c
->bit_offset
;
1163 if (toggle
== 2) { /* first iteration */
1165 /* the first checked bit was set,
1166 * store start value, */
1167 dcbp_set_start(p
, 1);
1168 /* but skip encoding of zero run length */
1172 dcbp_set_start(p
, 0);
1175 /* paranoia: catch zero runlength.
1176 * can only happen if bitmap is modified while we scan it. */
1178 drbd_err(device
, "unexpected zero runlength while encoding bitmap "
1179 "t:%u bo:%lu\n", toggle
, c
->bit_offset
);
1183 bits
= vli_encode_bits(&bs
, rl
);
1184 if (bits
== -ENOBUFS
) /* buffer full */
1187 drbd_err(device
, "error while encoding bitmap: %d\n", bits
);
1193 c
->bit_offset
= tmp
;
1194 } while (c
->bit_offset
< c
->bm_bits
);
1196 len
= bs
.cur
.b
- p
->code
+ !!bs
.cur
.bit
;
1198 if (plain_bits
< (len
<< 3)) {
1199 /* incompressible with this method.
1200 * we need to rewind both word and bit position. */
1201 c
->bit_offset
-= plain_bits
;
1202 bm_xfer_ctx_bit_to_word_offset(c
);
1203 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1207 /* RLE + VLI was able to compress it just fine.
1208 * update c->word_offset. */
1209 bm_xfer_ctx_bit_to_word_offset(c
);
1211 /* store pad_bits */
1212 dcbp_set_pad_bits(p
, (8 - bs
.cur
.bit
) & 0x7);
1218 * send_bitmap_rle_or_plain
1220 * Return 0 when done, 1 when another iteration is needed, and a negative error
1221 * code upon failure.
1224 send_bitmap_rle_or_plain(struct drbd_device
*device
, struct bm_xfer_ctx
*c
)
1226 struct drbd_socket
*sock
= &first_peer_device(device
)->connection
->data
;
1227 unsigned int header_size
= drbd_header_size(first_peer_device(device
)->connection
);
1228 struct p_compressed_bm
*p
= sock
->sbuf
+ header_size
;
1231 len
= fill_bitmap_rle_bits(device
, p
,
1232 DRBD_SOCKET_BUFFER_SIZE
- header_size
- sizeof(*p
), c
);
1237 dcbp_set_code(p
, RLE_VLI_Bits
);
1238 err
= __send_command(first_peer_device(device
)->connection
, device
->vnr
, sock
,
1239 P_COMPRESSED_BITMAP
, sizeof(*p
) + len
,
1242 c
->bytes
[0] += header_size
+ sizeof(*p
) + len
;
1244 if (c
->bit_offset
>= c
->bm_bits
)
1247 /* was not compressible.
1248 * send a buffer full of plain text bits instead. */
1249 unsigned int data_size
;
1250 unsigned long num_words
;
1251 unsigned long *p
= sock
->sbuf
+ header_size
;
1253 data_size
= DRBD_SOCKET_BUFFER_SIZE
- header_size
;
1254 num_words
= min_t(size_t, data_size
/ sizeof(*p
),
1255 c
->bm_words
- c
->word_offset
);
1256 len
= num_words
* sizeof(*p
);
1258 drbd_bm_get_lel(device
, c
->word_offset
, num_words
, p
);
1259 err
= __send_command(first_peer_device(device
)->connection
, device
->vnr
, sock
, P_BITMAP
, len
, NULL
, 0);
1260 c
->word_offset
+= num_words
;
1261 c
->bit_offset
= c
->word_offset
* BITS_PER_LONG
;
1264 c
->bytes
[1] += header_size
+ len
;
1266 if (c
->bit_offset
> c
->bm_bits
)
1267 c
->bit_offset
= c
->bm_bits
;
1271 INFO_bm_xfer_stats(device
, "send", c
);
1279 /* See the comment at receive_bitmap() */
1280 static int _drbd_send_bitmap(struct drbd_device
*device
)
1282 struct bm_xfer_ctx c
;
1285 if (!expect(device
->bitmap
))
1288 if (get_ldev(device
)) {
1289 if (drbd_md_test_flag(device
->ldev
, MDF_FULL_SYNC
)) {
1290 drbd_info(device
, "Writing the whole bitmap, MDF_FullSync was set.\n");
1291 drbd_bm_set_all(device
);
1292 if (drbd_bm_write(device
)) {
1293 /* write_bm did fail! Leave full sync flag set in Meta P_DATA
1294 * but otherwise process as per normal - need to tell other
1295 * side that a full resync is required! */
1296 drbd_err(device
, "Failed to write bitmap to disk!\n");
1298 drbd_md_clear_flag(device
, MDF_FULL_SYNC
);
1299 drbd_md_sync(device
);
1305 c
= (struct bm_xfer_ctx
) {
1306 .bm_bits
= drbd_bm_bits(device
),
1307 .bm_words
= drbd_bm_words(device
),
1311 err
= send_bitmap_rle_or_plain(device
, &c
);
1317 int drbd_send_bitmap(struct drbd_device
*device
)
1319 struct drbd_socket
*sock
= &first_peer_device(device
)->connection
->data
;
1322 mutex_lock(&sock
->mutex
);
1324 err
= !_drbd_send_bitmap(device
);
1325 mutex_unlock(&sock
->mutex
);
1329 void drbd_send_b_ack(struct drbd_connection
*connection
, u32 barrier_nr
, u32 set_size
)
1331 struct drbd_socket
*sock
;
1332 struct p_barrier_ack
*p
;
1334 if (connection
->cstate
< C_WF_REPORT_PARAMS
)
1337 sock
= &connection
->meta
;
1338 p
= conn_prepare_command(connection
, sock
);
1341 p
->barrier
= barrier_nr
;
1342 p
->set_size
= cpu_to_be32(set_size
);
1343 conn_send_command(connection
, sock
, P_BARRIER_ACK
, sizeof(*p
), NULL
, 0);
1347 * _drbd_send_ack() - Sends an ack packet
1348 * @device: DRBD device.
1349 * @cmd: Packet command code.
1350 * @sector: sector, needs to be in big endian byte order
1351 * @blksize: size in byte, needs to be in big endian byte order
1352 * @block_id: Id, big endian byte order
1354 static int _drbd_send_ack(struct drbd_peer_device
*peer_device
, enum drbd_packet cmd
,
1355 u64 sector
, u32 blksize
, u64 block_id
)
1357 struct drbd_socket
*sock
;
1358 struct p_block_ack
*p
;
1360 if (peer_device
->device
->state
.conn
< C_CONNECTED
)
1363 sock
= &peer_device
->connection
->meta
;
1364 p
= drbd_prepare_command(peer_device
, sock
);
1368 p
->block_id
= block_id
;
1369 p
->blksize
= blksize
;
1370 p
->seq_num
= cpu_to_be32(atomic_inc_return(&peer_device
->device
->packet_seq
));
1371 return drbd_send_command(peer_device
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1374 /* dp->sector and dp->block_id already/still in network byte order,
1375 * data_size is payload size according to dp->head,
1376 * and may need to be corrected for digest size. */
1377 void drbd_send_ack_dp(struct drbd_peer_device
*peer_device
, enum drbd_packet cmd
,
1378 struct p_data
*dp
, int data_size
)
1380 if (peer_device
->connection
->peer_integrity_tfm
)
1381 data_size
-= crypto_ahash_digestsize(peer_device
->connection
->peer_integrity_tfm
);
1382 _drbd_send_ack(peer_device
, cmd
, dp
->sector
, cpu_to_be32(data_size
),
1386 void drbd_send_ack_rp(struct drbd_peer_device
*peer_device
, enum drbd_packet cmd
,
1387 struct p_block_req
*rp
)
1389 _drbd_send_ack(peer_device
, cmd
, rp
->sector
, rp
->blksize
, rp
->block_id
);
1393 * drbd_send_ack() - Sends an ack packet
1394 * @device: DRBD device
1395 * @cmd: packet command code
1396 * @peer_req: peer request
1398 int drbd_send_ack(struct drbd_peer_device
*peer_device
, enum drbd_packet cmd
,
1399 struct drbd_peer_request
*peer_req
)
1401 return _drbd_send_ack(peer_device
, cmd
,
1402 cpu_to_be64(peer_req
->i
.sector
),
1403 cpu_to_be32(peer_req
->i
.size
),
1404 peer_req
->block_id
);
1407 /* This function misuses the block_id field to signal if the blocks
1408 * are is sync or not. */
1409 int drbd_send_ack_ex(struct drbd_peer_device
*peer_device
, enum drbd_packet cmd
,
1410 sector_t sector
, int blksize
, u64 block_id
)
1412 return _drbd_send_ack(peer_device
, cmd
,
1413 cpu_to_be64(sector
),
1414 cpu_to_be32(blksize
),
1415 cpu_to_be64(block_id
));
1418 int drbd_send_rs_deallocated(struct drbd_peer_device
*peer_device
,
1419 struct drbd_peer_request
*peer_req
)
1421 struct drbd_socket
*sock
;
1422 struct p_block_desc
*p
;
1424 sock
= &peer_device
->connection
->data
;
1425 p
= drbd_prepare_command(peer_device
, sock
);
1428 p
->sector
= cpu_to_be64(peer_req
->i
.sector
);
1429 p
->blksize
= cpu_to_be32(peer_req
->i
.size
);
1431 return drbd_send_command(peer_device
, sock
, P_RS_DEALLOCATED
, sizeof(*p
), NULL
, 0);
1434 int drbd_send_drequest(struct drbd_peer_device
*peer_device
, int cmd
,
1435 sector_t sector
, int size
, u64 block_id
)
1437 struct drbd_socket
*sock
;
1438 struct p_block_req
*p
;
1440 sock
= &peer_device
->connection
->data
;
1441 p
= drbd_prepare_command(peer_device
, sock
);
1444 p
->sector
= cpu_to_be64(sector
);
1445 p
->block_id
= block_id
;
1446 p
->blksize
= cpu_to_be32(size
);
1447 return drbd_send_command(peer_device
, sock
, cmd
, sizeof(*p
), NULL
, 0);
1450 int drbd_send_drequest_csum(struct drbd_peer_device
*peer_device
, sector_t sector
, int size
,
1451 void *digest
, int digest_size
, enum drbd_packet cmd
)
1453 struct drbd_socket
*sock
;
1454 struct p_block_req
*p
;
1456 /* FIXME: Put the digest into the preallocated socket buffer. */
1458 sock
= &peer_device
->connection
->data
;
1459 p
= drbd_prepare_command(peer_device
, sock
);
1462 p
->sector
= cpu_to_be64(sector
);
1463 p
->block_id
= ID_SYNCER
/* unused */;
1464 p
->blksize
= cpu_to_be32(size
);
1465 return drbd_send_command(peer_device
, sock
, cmd
, sizeof(*p
), digest
, digest_size
);
1468 int drbd_send_ov_request(struct drbd_peer_device
*peer_device
, sector_t sector
, int size
)
1470 struct drbd_socket
*sock
;
1471 struct p_block_req
*p
;
1473 sock
= &peer_device
->connection
->data
;
1474 p
= drbd_prepare_command(peer_device
, sock
);
1477 p
->sector
= cpu_to_be64(sector
);
1478 p
->block_id
= ID_SYNCER
/* unused */;
1479 p
->blksize
= cpu_to_be32(size
);
1480 return drbd_send_command(peer_device
, sock
, P_OV_REQUEST
, sizeof(*p
), NULL
, 0);
1483 /* called on sndtimeo
1484 * returns false if we should retry,
1485 * true if we think connection is dead
1487 static int we_should_drop_the_connection(struct drbd_connection
*connection
, struct socket
*sock
)
1490 /* long elapsed = (long)(jiffies - device->last_received); */
1492 drop_it
= connection
->meta
.socket
== sock
1493 || !connection
->ack_receiver
.task
1494 || get_t_state(&connection
->ack_receiver
) != RUNNING
1495 || connection
->cstate
< C_WF_REPORT_PARAMS
;
1500 drop_it
= !--connection
->ko_count
;
1502 drbd_err(connection
, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
1503 current
->comm
, current
->pid
, connection
->ko_count
);
1504 request_ping(connection
);
1507 return drop_it
; /* && (device->state == R_PRIMARY) */;
1510 static void drbd_update_congested(struct drbd_connection
*connection
)
1512 struct sock
*sk
= connection
->data
.socket
->sk
;
1513 if (sk
->sk_wmem_queued
> sk
->sk_sndbuf
* 4 / 5)
1514 set_bit(NET_CONGESTED
, &connection
->flags
);
1517 /* The idea of sendpage seems to be to put some kind of reference
1518 * to the page into the skb, and to hand it over to the NIC. In
1519 * this process get_page() gets called.
1521 * As soon as the page was really sent over the network put_page()
1522 * gets called by some part of the network layer. [ NIC driver? ]
1524 * [ get_page() / put_page() increment/decrement the count. If count
1525 * reaches 0 the page will be freed. ]
1527 * This works nicely with pages from FSs.
1528 * But this means that in protocol A we might signal IO completion too early!
1530 * In order not to corrupt data during a resync we must make sure
1531 * that we do not reuse our own buffer pages (EEs) to early, therefore
1532 * we have the net_ee list.
1534 * XFS seems to have problems, still, it submits pages with page_count == 0!
1535 * As a workaround, we disable sendpage on pages
1536 * with page_count == 0 or PageSlab.
1538 static int _drbd_no_send_page(struct drbd_peer_device
*peer_device
, struct page
*page
,
1539 int offset
, size_t size
, unsigned msg_flags
)
1541 struct socket
*socket
;
1545 socket
= peer_device
->connection
->data
.socket
;
1546 addr
= kmap(page
) + offset
;
1547 err
= drbd_send_all(peer_device
->connection
, socket
, addr
, size
, msg_flags
);
1550 peer_device
->device
->send_cnt
+= size
>> 9;
1554 static int _drbd_send_page(struct drbd_peer_device
*peer_device
, struct page
*page
,
1555 int offset
, size_t size
, unsigned msg_flags
)
1557 struct socket
*socket
= peer_device
->connection
->data
.socket
;
1561 /* e.g. XFS meta- & log-data is in slab pages, which have a
1562 * page_count of 0 and/or have PageSlab() set.
1563 * we cannot use send_page for those, as that does get_page();
1564 * put_page(); and would cause either a VM_BUG directly, or
1565 * __page_cache_release a page that would actually still be referenced
1566 * by someone, leading to some obscure delayed Oops somewhere else. */
1567 if (drbd_disable_sendpage
|| (page_count(page
) < 1) || PageSlab(page
))
1568 return _drbd_no_send_page(peer_device
, page
, offset
, size
, msg_flags
);
1570 msg_flags
|= MSG_NOSIGNAL
;
1571 drbd_update_congested(peer_device
->connection
);
1575 sent
= socket
->ops
->sendpage(socket
, page
, offset
, len
, msg_flags
);
1577 if (sent
== -EAGAIN
) {
1578 if (we_should_drop_the_connection(peer_device
->connection
, socket
))
1582 drbd_warn(peer_device
->device
, "%s: size=%d len=%d sent=%d\n",
1583 __func__
, (int)size
, len
, sent
);
1590 } while (len
> 0 /* THINK && device->cstate >= C_CONNECTED*/);
1591 clear_bit(NET_CONGESTED
, &peer_device
->connection
->flags
);
1595 peer_device
->device
->send_cnt
+= size
>> 9;
1600 static int _drbd_send_bio(struct drbd_peer_device
*peer_device
, struct bio
*bio
)
1602 struct bio_vec bvec
;
1603 struct bvec_iter iter
;
1605 /* hint all but last page with MSG_MORE */
1606 bio_for_each_segment(bvec
, bio
, iter
) {
1609 err
= _drbd_no_send_page(peer_device
, bvec
.bv_page
,
1610 bvec
.bv_offset
, bvec
.bv_len
,
1611 bio_iter_last(bvec
, iter
)
1615 /* REQ_OP_WRITE_SAME has only one segment */
1616 if (bio_op(bio
) == REQ_OP_WRITE_SAME
)
1622 static int _drbd_send_zc_bio(struct drbd_peer_device
*peer_device
, struct bio
*bio
)
1624 struct bio_vec bvec
;
1625 struct bvec_iter iter
;
1627 /* hint all but last page with MSG_MORE */
1628 bio_for_each_segment(bvec
, bio
, iter
) {
1631 err
= _drbd_send_page(peer_device
, bvec
.bv_page
,
1632 bvec
.bv_offset
, bvec
.bv_len
,
1633 bio_iter_last(bvec
, iter
) ? 0 : MSG_MORE
);
1636 /* REQ_OP_WRITE_SAME has only one segment */
1637 if (bio_op(bio
) == REQ_OP_WRITE_SAME
)
1643 static int _drbd_send_zc_ee(struct drbd_peer_device
*peer_device
,
1644 struct drbd_peer_request
*peer_req
)
1646 struct page
*page
= peer_req
->pages
;
1647 unsigned len
= peer_req
->i
.size
;
1650 /* hint all but last page with MSG_MORE */
1651 page_chain_for_each(page
) {
1652 unsigned l
= min_t(unsigned, len
, PAGE_SIZE
);
1654 err
= _drbd_send_page(peer_device
, page
, 0, l
,
1655 page_chain_next(page
) ? MSG_MORE
: 0);
1663 static u32
bio_flags_to_wire(struct drbd_connection
*connection
,
1666 if (connection
->agreed_pro_version
>= 95)
1667 return (bio
->bi_opf
& REQ_SYNC
? DP_RW_SYNC
: 0) |
1668 (bio
->bi_opf
& REQ_FUA
? DP_FUA
: 0) |
1669 (bio
->bi_opf
& REQ_PREFLUSH
? DP_FLUSH
: 0) |
1670 (bio_op(bio
) == REQ_OP_WRITE_SAME
? DP_WSAME
: 0) |
1671 (bio_op(bio
) == REQ_OP_DISCARD
? DP_DISCARD
: 0) |
1672 (bio_op(bio
) == REQ_OP_WRITE_ZEROES
? DP_DISCARD
: 0);
1674 return bio
->bi_opf
& REQ_SYNC
? DP_RW_SYNC
: 0;
1677 /* Used to send write or TRIM aka REQ_DISCARD requests
1678 * R_PRIMARY -> Peer (P_DATA, P_TRIM)
1680 int drbd_send_dblock(struct drbd_peer_device
*peer_device
, struct drbd_request
*req
)
1682 struct drbd_device
*device
= peer_device
->device
;
1683 struct drbd_socket
*sock
;
1685 struct p_wsame
*wsame
= NULL
;
1687 unsigned int dp_flags
= 0;
1691 sock
= &peer_device
->connection
->data
;
1692 p
= drbd_prepare_command(peer_device
, sock
);
1693 digest_size
= peer_device
->connection
->integrity_tfm
?
1694 crypto_ahash_digestsize(peer_device
->connection
->integrity_tfm
) : 0;
1698 p
->sector
= cpu_to_be64(req
->i
.sector
);
1699 p
->block_id
= (unsigned long)req
;
1700 p
->seq_num
= cpu_to_be32(atomic_inc_return(&device
->packet_seq
));
1701 dp_flags
= bio_flags_to_wire(peer_device
->connection
, req
->master_bio
);
1702 if (device
->state
.conn
>= C_SYNC_SOURCE
&&
1703 device
->state
.conn
<= C_PAUSED_SYNC_T
)
1704 dp_flags
|= DP_MAY_SET_IN_SYNC
;
1705 if (peer_device
->connection
->agreed_pro_version
>= 100) {
1706 if (req
->rq_state
& RQ_EXP_RECEIVE_ACK
)
1707 dp_flags
|= DP_SEND_RECEIVE_ACK
;
1708 /* During resync, request an explicit write ack,
1709 * even in protocol != C */
1710 if (req
->rq_state
& RQ_EXP_WRITE_ACK
1711 || (dp_flags
& DP_MAY_SET_IN_SYNC
))
1712 dp_flags
|= DP_SEND_WRITE_ACK
;
1714 p
->dp_flags
= cpu_to_be32(dp_flags
);
1716 if (dp_flags
& DP_DISCARD
) {
1717 struct p_trim
*t
= (struct p_trim
*)p
;
1718 t
->size
= cpu_to_be32(req
->i
.size
);
1719 err
= __send_command(peer_device
->connection
, device
->vnr
, sock
, P_TRIM
, sizeof(*t
), NULL
, 0);
1722 if (dp_flags
& DP_WSAME
) {
1723 /* this will only work if DRBD_FF_WSAME is set AND the
1724 * handshake agreed that all nodes and backend devices are
1725 * WRITE_SAME capable and agree on logical_block_size */
1726 wsame
= (struct p_wsame
*)p
;
1727 digest_out
= wsame
+ 1;
1728 wsame
->size
= cpu_to_be32(req
->i
.size
);
1732 /* our digest is still only over the payload.
1733 * TRIM does not carry any payload. */
1735 drbd_csum_bio(peer_device
->connection
->integrity_tfm
, req
->master_bio
, digest_out
);
1738 __send_command(peer_device
->connection
, device
->vnr
, sock
, P_WSAME
,
1739 sizeof(*wsame
) + digest_size
, NULL
,
1740 bio_iovec(req
->master_bio
).bv_len
);
1743 __send_command(peer_device
->connection
, device
->vnr
, sock
, P_DATA
,
1744 sizeof(*p
) + digest_size
, NULL
, req
->i
.size
);
1746 /* For protocol A, we have to memcpy the payload into
1747 * socket buffers, as we may complete right away
1748 * as soon as we handed it over to tcp, at which point the data
1749 * pages may become invalid.
1751 * For data-integrity enabled, we copy it as well, so we can be
1752 * sure that even if the bio pages may still be modified, it
1753 * won't change the data on the wire, thus if the digest checks
1754 * out ok after sending on this side, but does not fit on the
1755 * receiving side, we sure have detected corruption elsewhere.
1757 if (!(req
->rq_state
& (RQ_EXP_RECEIVE_ACK
| RQ_EXP_WRITE_ACK
)) || digest_size
)
1758 err
= _drbd_send_bio(peer_device
, req
->master_bio
);
1760 err
= _drbd_send_zc_bio(peer_device
, req
->master_bio
);
1762 /* double check digest, sometimes buffers have been modified in flight. */
1763 if (digest_size
> 0 && digest_size
<= 64) {
1764 /* 64 byte, 512 bit, is the largest digest size
1765 * currently supported in kernel crypto. */
1766 unsigned char digest
[64];
1767 drbd_csum_bio(peer_device
->connection
->integrity_tfm
, req
->master_bio
, digest
);
1768 if (memcmp(p
+ 1, digest
, digest_size
)) {
1770 "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
1771 (unsigned long long)req
->i
.sector
, req
->i
.size
);
1773 } /* else if (digest_size > 64) {
1774 ... Be noisy about digest too large ...
1778 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1783 /* answer packet, used to send data back for read requests:
1784 * Peer -> (diskless) R_PRIMARY (P_DATA_REPLY)
1785 * C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
1787 int drbd_send_block(struct drbd_peer_device
*peer_device
, enum drbd_packet cmd
,
1788 struct drbd_peer_request
*peer_req
)
1790 struct drbd_device
*device
= peer_device
->device
;
1791 struct drbd_socket
*sock
;
1796 sock
= &peer_device
->connection
->data
;
1797 p
= drbd_prepare_command(peer_device
, sock
);
1799 digest_size
= peer_device
->connection
->integrity_tfm
?
1800 crypto_ahash_digestsize(peer_device
->connection
->integrity_tfm
) : 0;
1804 p
->sector
= cpu_to_be64(peer_req
->i
.sector
);
1805 p
->block_id
= peer_req
->block_id
;
1806 p
->seq_num
= 0; /* unused */
1809 drbd_csum_ee(peer_device
->connection
->integrity_tfm
, peer_req
, p
+ 1);
1810 err
= __send_command(peer_device
->connection
, device
->vnr
, sock
, cmd
, sizeof(*p
) + digest_size
, NULL
, peer_req
->i
.size
);
1812 err
= _drbd_send_zc_ee(peer_device
, peer_req
);
1813 mutex_unlock(&sock
->mutex
); /* locked by drbd_prepare_command() */
1818 int drbd_send_out_of_sync(struct drbd_peer_device
*peer_device
, struct drbd_request
*req
)
1820 struct drbd_socket
*sock
;
1821 struct p_block_desc
*p
;
1823 sock
= &peer_device
->connection
->data
;
1824 p
= drbd_prepare_command(peer_device
, sock
);
1827 p
->sector
= cpu_to_be64(req
->i
.sector
);
1828 p
->blksize
= cpu_to_be32(req
->i
.size
);
1829 return drbd_send_command(peer_device
, sock
, P_OUT_OF_SYNC
, sizeof(*p
), NULL
, 0);
1833 drbd_send distinguishes two cases:
1835 Packets sent via the data socket "sock"
1836 and packets sent via the meta data socket "msock"
1839 -----------------+-------------------------+------------------------------
1840 timeout conf.timeout / 2 conf.timeout / 2
1841 timeout action send a ping via msock Abort communication
1842 and close all sockets
1846 * you must have down()ed the appropriate [m]sock_mutex elsewhere!
1848 int drbd_send(struct drbd_connection
*connection
, struct socket
*sock
,
1849 void *buf
, size_t size
, unsigned msg_flags
)
1851 struct kvec iov
= {.iov_base
= buf
, .iov_len
= size
};
1852 struct msghdr msg
= {.msg_flags
= msg_flags
| MSG_NOSIGNAL
};
1858 /* THINK if (signal_pending) return ... ? */
1860 iov_iter_kvec(&msg
.msg_iter
, WRITE
| ITER_KVEC
, &iov
, 1, size
);
1862 if (sock
== connection
->data
.socket
) {
1864 connection
->ko_count
= rcu_dereference(connection
->net_conf
)->ko_count
;
1866 drbd_update_congested(connection
);
1869 rv
= sock_sendmsg(sock
, &msg
);
1870 if (rv
== -EAGAIN
) {
1871 if (we_should_drop_the_connection(connection
, sock
))
1877 flush_signals(current
);
1883 } while (sent
< size
);
1885 if (sock
== connection
->data
.socket
)
1886 clear_bit(NET_CONGESTED
, &connection
->flags
);
1889 if (rv
!= -EAGAIN
) {
1890 drbd_err(connection
, "%s_sendmsg returned %d\n",
1891 sock
== connection
->meta
.socket
? "msock" : "sock",
1893 conn_request_state(connection
, NS(conn
, C_BROKEN_PIPE
), CS_HARD
);
1895 conn_request_state(connection
, NS(conn
, C_TIMEOUT
), CS_HARD
);
1902 * drbd_send_all - Send an entire buffer
1904 * Returns 0 upon success and a negative error value otherwise.
1906 int drbd_send_all(struct drbd_connection
*connection
, struct socket
*sock
, void *buffer
,
1907 size_t size
, unsigned msg_flags
)
1911 err
= drbd_send(connection
, sock
, buffer
, size
, msg_flags
);
1919 static int drbd_open(struct block_device
*bdev
, fmode_t mode
)
1921 struct drbd_device
*device
= bdev
->bd_disk
->private_data
;
1922 unsigned long flags
;
1925 mutex_lock(&drbd_main_mutex
);
1926 spin_lock_irqsave(&device
->resource
->req_lock
, flags
);
1927 /* to have a stable device->state.role
1928 * and no race with updating open_cnt */
1930 if (device
->state
.role
!= R_PRIMARY
) {
1931 if (mode
& FMODE_WRITE
)
1933 else if (!drbd_allow_oos
)
1939 spin_unlock_irqrestore(&device
->resource
->req_lock
, flags
);
1940 mutex_unlock(&drbd_main_mutex
);
1945 static void drbd_release(struct gendisk
*gd
, fmode_t mode
)
1947 struct drbd_device
*device
= gd
->private_data
;
1948 mutex_lock(&drbd_main_mutex
);
1950 mutex_unlock(&drbd_main_mutex
);
1953 /* need to hold resource->req_lock */
1954 void drbd_queue_unplug(struct drbd_device
*device
)
1956 if (device
->state
.pdsk
>= D_INCONSISTENT
&& device
->state
.conn
>= C_CONNECTED
) {
1957 D_ASSERT(device
, device
->state
.role
== R_PRIMARY
);
1958 if (test_and_clear_bit(UNPLUG_REMOTE
, &device
->flags
)) {
1959 drbd_queue_work_if_unqueued(
1960 &first_peer_device(device
)->connection
->sender_work
,
1961 &device
->unplug_work
);
1966 static void drbd_set_defaults(struct drbd_device
*device
)
1968 /* Beware! The actual layout differs
1969 * between big endian and little endian */
1970 device
->state
= (union drbd_dev_state
) {
1971 { .role
= R_SECONDARY
,
1973 .conn
= C_STANDALONE
,
1979 void drbd_init_set_defaults(struct drbd_device
*device
)
1981 /* the memset(,0,) did most of this.
1982 * note: only assignments, no allocation in here */
1984 drbd_set_defaults(device
);
1986 atomic_set(&device
->ap_bio_cnt
, 0);
1987 atomic_set(&device
->ap_actlog_cnt
, 0);
1988 atomic_set(&device
->ap_pending_cnt
, 0);
1989 atomic_set(&device
->rs_pending_cnt
, 0);
1990 atomic_set(&device
->unacked_cnt
, 0);
1991 atomic_set(&device
->local_cnt
, 0);
1992 atomic_set(&device
->pp_in_use_by_net
, 0);
1993 atomic_set(&device
->rs_sect_in
, 0);
1994 atomic_set(&device
->rs_sect_ev
, 0);
1995 atomic_set(&device
->ap_in_flight
, 0);
1996 atomic_set(&device
->md_io
.in_use
, 0);
1998 mutex_init(&device
->own_state_mutex
);
1999 device
->state_mutex
= &device
->own_state_mutex
;
2001 spin_lock_init(&device
->al_lock
);
2002 spin_lock_init(&device
->peer_seq_lock
);
2004 INIT_LIST_HEAD(&device
->active_ee
);
2005 INIT_LIST_HEAD(&device
->sync_ee
);
2006 INIT_LIST_HEAD(&device
->done_ee
);
2007 INIT_LIST_HEAD(&device
->read_ee
);
2008 INIT_LIST_HEAD(&device
->net_ee
);
2009 INIT_LIST_HEAD(&device
->resync_reads
);
2010 INIT_LIST_HEAD(&device
->resync_work
.list
);
2011 INIT_LIST_HEAD(&device
->unplug_work
.list
);
2012 INIT_LIST_HEAD(&device
->bm_io_work
.w
.list
);
2013 INIT_LIST_HEAD(&device
->pending_master_completion
[0]);
2014 INIT_LIST_HEAD(&device
->pending_master_completion
[1]);
2015 INIT_LIST_HEAD(&device
->pending_completion
[0]);
2016 INIT_LIST_HEAD(&device
->pending_completion
[1]);
2018 device
->resync_work
.cb
= w_resync_timer
;
2019 device
->unplug_work
.cb
= w_send_write_hint
;
2020 device
->bm_io_work
.w
.cb
= w_bitmap_io
;
2022 timer_setup(&device
->resync_timer
, resync_timer_fn
, 0);
2023 timer_setup(&device
->md_sync_timer
, md_sync_timer_fn
, 0);
2024 timer_setup(&device
->start_resync_timer
, start_resync_timer_fn
, 0);
2025 timer_setup(&device
->request_timer
, request_timer_fn
, 0);
2027 init_waitqueue_head(&device
->misc_wait
);
2028 init_waitqueue_head(&device
->state_wait
);
2029 init_waitqueue_head(&device
->ee_wait
);
2030 init_waitqueue_head(&device
->al_wait
);
2031 init_waitqueue_head(&device
->seq_wait
);
2033 device
->resync_wenr
= LC_FREE
;
2034 device
->peer_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
2035 device
->local_max_bio_size
= DRBD_MAX_BIO_SIZE_SAFE
;
2038 void drbd_device_cleanup(struct drbd_device
*device
)
2041 if (first_peer_device(device
)->connection
->receiver
.t_state
!= NONE
)
2042 drbd_err(device
, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
2043 first_peer_device(device
)->connection
->receiver
.t_state
);
2045 device
->al_writ_cnt
=
2046 device
->bm_writ_cnt
=
2054 device
->rs_failed
= 0;
2055 device
->rs_last_events
= 0;
2056 device
->rs_last_sect_ev
= 0;
2057 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
2058 device
->rs_mark_left
[i
] = 0;
2059 device
->rs_mark_time
[i
] = 0;
2061 D_ASSERT(device
, first_peer_device(device
)->connection
->net_conf
== NULL
);
2063 drbd_set_my_capacity(device
, 0);
2064 if (device
->bitmap
) {
2065 /* maybe never allocated. */
2066 drbd_bm_resize(device
, 0, 1);
2067 drbd_bm_cleanup(device
);
2070 drbd_backing_dev_free(device
, device
->ldev
);
2071 device
->ldev
= NULL
;
2073 clear_bit(AL_SUSPENDED
, &device
->flags
);
2075 D_ASSERT(device
, list_empty(&device
->active_ee
));
2076 D_ASSERT(device
, list_empty(&device
->sync_ee
));
2077 D_ASSERT(device
, list_empty(&device
->done_ee
));
2078 D_ASSERT(device
, list_empty(&device
->read_ee
));
2079 D_ASSERT(device
, list_empty(&device
->net_ee
));
2080 D_ASSERT(device
, list_empty(&device
->resync_reads
));
2081 D_ASSERT(device
, list_empty(&first_peer_device(device
)->connection
->sender_work
.q
));
2082 D_ASSERT(device
, list_empty(&device
->resync_work
.list
));
2083 D_ASSERT(device
, list_empty(&device
->unplug_work
.list
));
2085 drbd_set_defaults(device
);
2089 static void drbd_destroy_mempools(void)
2093 while (drbd_pp_pool
) {
2094 page
= drbd_pp_pool
;
2095 drbd_pp_pool
= (struct page
*)page_private(page
);
2100 /* D_ASSERT(device, atomic_read(&drbd_pp_vacant)==0); */
2102 bioset_exit(&drbd_io_bio_set
);
2103 bioset_exit(&drbd_md_io_bio_set
);
2104 mempool_exit(&drbd_md_io_page_pool
);
2105 mempool_exit(&drbd_ee_mempool
);
2106 mempool_exit(&drbd_request_mempool
);
2107 kmem_cache_destroy(drbd_ee_cache
);
2108 kmem_cache_destroy(drbd_request_cache
);
2109 kmem_cache_destroy(drbd_bm_ext_cache
);
2110 kmem_cache_destroy(drbd_al_ext_cache
);
2112 drbd_ee_cache
= NULL
;
2113 drbd_request_cache
= NULL
;
2114 drbd_bm_ext_cache
= NULL
;
2115 drbd_al_ext_cache
= NULL
;
2120 static int drbd_create_mempools(void)
2123 const int number
= (DRBD_MAX_BIO_SIZE
/PAGE_SIZE
) * drbd_minor_count
;
2127 drbd_request_cache
= kmem_cache_create(
2128 "drbd_req", sizeof(struct drbd_request
), 0, 0, NULL
);
2129 if (drbd_request_cache
== NULL
)
2132 drbd_ee_cache
= kmem_cache_create(
2133 "drbd_ee", sizeof(struct drbd_peer_request
), 0, 0, NULL
);
2134 if (drbd_ee_cache
== NULL
)
2137 drbd_bm_ext_cache
= kmem_cache_create(
2138 "drbd_bm", sizeof(struct bm_extent
), 0, 0, NULL
);
2139 if (drbd_bm_ext_cache
== NULL
)
2142 drbd_al_ext_cache
= kmem_cache_create(
2143 "drbd_al", sizeof(struct lc_element
), 0, 0, NULL
);
2144 if (drbd_al_ext_cache
== NULL
)
2148 ret
= bioset_init(&drbd_io_bio_set
, BIO_POOL_SIZE
, 0, 0);
2152 ret
= bioset_init(&drbd_md_io_bio_set
, DRBD_MIN_POOL_PAGES
, 0,
2157 ret
= mempool_init_page_pool(&drbd_md_io_page_pool
, DRBD_MIN_POOL_PAGES
, 0);
2161 ret
= mempool_init_slab_pool(&drbd_request_mempool
, number
,
2162 drbd_request_cache
);
2166 ret
= mempool_init_slab_pool(&drbd_ee_mempool
, number
, drbd_ee_cache
);
2170 /* drbd's page pool */
2171 spin_lock_init(&drbd_pp_lock
);
2173 for (i
= 0; i
< number
; i
++) {
2174 page
= alloc_page(GFP_HIGHUSER
);
2177 set_page_private(page
, (unsigned long)drbd_pp_pool
);
2178 drbd_pp_pool
= page
;
2180 drbd_pp_vacant
= number
;
2185 drbd_destroy_mempools(); /* in case we allocated some */
2189 static void drbd_release_all_peer_reqs(struct drbd_device
*device
)
2193 rr
= drbd_free_peer_reqs(device
, &device
->active_ee
);
2195 drbd_err(device
, "%d EEs in active list found!\n", rr
);
2197 rr
= drbd_free_peer_reqs(device
, &device
->sync_ee
);
2199 drbd_err(device
, "%d EEs in sync list found!\n", rr
);
2201 rr
= drbd_free_peer_reqs(device
, &device
->read_ee
);
2203 drbd_err(device
, "%d EEs in read list found!\n", rr
);
2205 rr
= drbd_free_peer_reqs(device
, &device
->done_ee
);
2207 drbd_err(device
, "%d EEs in done list found!\n", rr
);
2209 rr
= drbd_free_peer_reqs(device
, &device
->net_ee
);
2211 drbd_err(device
, "%d EEs in net list found!\n", rr
);
2214 /* caution. no locking. */
2215 void drbd_destroy_device(struct kref
*kref
)
2217 struct drbd_device
*device
= container_of(kref
, struct drbd_device
, kref
);
2218 struct drbd_resource
*resource
= device
->resource
;
2219 struct drbd_peer_device
*peer_device
, *tmp_peer_device
;
2221 del_timer_sync(&device
->request_timer
);
2223 /* paranoia asserts */
2224 D_ASSERT(device
, device
->open_cnt
== 0);
2225 /* end paranoia asserts */
2227 /* cleanup stuff that may have been allocated during
2228 * device (re-)configuration or state changes */
2230 if (device
->this_bdev
)
2231 bdput(device
->this_bdev
);
2233 drbd_backing_dev_free(device
, device
->ldev
);
2234 device
->ldev
= NULL
;
2236 drbd_release_all_peer_reqs(device
);
2238 lc_destroy(device
->act_log
);
2239 lc_destroy(device
->resync
);
2241 kfree(device
->p_uuid
);
2242 /* device->p_uuid = NULL; */
2244 if (device
->bitmap
) /* should no longer be there. */
2245 drbd_bm_cleanup(device
);
2246 __free_page(device
->md_io
.page
);
2247 put_disk(device
->vdisk
);
2248 blk_cleanup_queue(device
->rq_queue
);
2249 kfree(device
->rs_plan_s
);
2251 /* not for_each_connection(connection, resource):
2252 * those may have been cleaned up and disassociated already.
2254 for_each_peer_device_safe(peer_device
, tmp_peer_device
, device
) {
2255 kref_put(&peer_device
->connection
->kref
, drbd_destroy_connection
);
2258 memset(device
, 0xfd, sizeof(*device
));
2260 kref_put(&resource
->kref
, drbd_destroy_resource
);
2263 /* One global retry thread, if we need to push back some bio and have it
2264 * reinserted through our make request function.
2266 static struct retry_worker
{
2267 struct workqueue_struct
*wq
;
2268 struct work_struct worker
;
2271 struct list_head writes
;
2274 static void do_retry(struct work_struct
*ws
)
2276 struct retry_worker
*retry
= container_of(ws
, struct retry_worker
, worker
);
2278 struct drbd_request
*req
, *tmp
;
2280 spin_lock_irq(&retry
->lock
);
2281 list_splice_init(&retry
->writes
, &writes
);
2282 spin_unlock_irq(&retry
->lock
);
2284 list_for_each_entry_safe(req
, tmp
, &writes
, tl_requests
) {
2285 struct drbd_device
*device
= req
->device
;
2286 struct bio
*bio
= req
->master_bio
;
2287 unsigned long start_jif
= req
->start_jif
;
2291 expect(atomic_read(&req
->completion_ref
) == 0) &&
2292 expect(req
->rq_state
& RQ_POSTPONED
) &&
2293 expect((req
->rq_state
& RQ_LOCAL_PENDING
) == 0 ||
2294 (req
->rq_state
& RQ_LOCAL_ABORTED
) != 0);
2297 drbd_err(device
, "req=%p completion_ref=%d rq_state=%x\n",
2298 req
, atomic_read(&req
->completion_ref
),
2301 /* We still need to put one kref associated with the
2302 * "completion_ref" going zero in the code path that queued it
2303 * here. The request object may still be referenced by a
2304 * frozen local req->private_bio, in case we force-detached.
2306 kref_put(&req
->kref
, drbd_req_destroy
);
2308 /* A single suspended or otherwise blocking device may stall
2309 * all others as well. Fortunately, this code path is to
2310 * recover from a situation that "should not happen":
2311 * concurrent writes in multi-primary setup.
2312 * In a "normal" lifecycle, this workqueue is supposed to be
2313 * destroyed without ever doing anything.
2314 * If it turns out to be an issue anyways, we can do per
2315 * resource (replication group) or per device (minor) retry
2316 * workqueues instead.
2319 /* We are not just doing generic_make_request(),
2320 * as we want to keep the start_time information. */
2322 __drbd_make_request(device
, bio
, start_jif
);
2326 /* called via drbd_req_put_completion_ref(),
2327 * holds resource->req_lock */
2328 void drbd_restart_request(struct drbd_request
*req
)
2330 unsigned long flags
;
2331 spin_lock_irqsave(&retry
.lock
, flags
);
2332 list_move_tail(&req
->tl_requests
, &retry
.writes
);
2333 spin_unlock_irqrestore(&retry
.lock
, flags
);
2335 /* Drop the extra reference that would otherwise
2336 * have been dropped by complete_master_bio.
2337 * do_retry() needs to grab a new one. */
2338 dec_ap_bio(req
->device
);
2340 queue_work(retry
.wq
, &retry
.worker
);
2343 void drbd_destroy_resource(struct kref
*kref
)
2345 struct drbd_resource
*resource
=
2346 container_of(kref
, struct drbd_resource
, kref
);
2348 idr_destroy(&resource
->devices
);
2349 free_cpumask_var(resource
->cpu_mask
);
2350 kfree(resource
->name
);
2351 memset(resource
, 0xf2, sizeof(*resource
));
2355 void drbd_free_resource(struct drbd_resource
*resource
)
2357 struct drbd_connection
*connection
, *tmp
;
2359 for_each_connection_safe(connection
, tmp
, resource
) {
2360 list_del(&connection
->connections
);
2361 drbd_debugfs_connection_cleanup(connection
);
2362 kref_put(&connection
->kref
, drbd_destroy_connection
);
2364 drbd_debugfs_resource_cleanup(resource
);
2365 kref_put(&resource
->kref
, drbd_destroy_resource
);
2368 static void drbd_cleanup(void)
2371 struct drbd_device
*device
;
2372 struct drbd_resource
*resource
, *tmp
;
2374 /* first remove proc,
2375 * drbdsetup uses it's presence to detect
2376 * whether DRBD is loaded.
2377 * If we would get stuck in proc removal,
2378 * but have netlink already deregistered,
2379 * some drbdsetup commands may wait forever
2383 remove_proc_entry("drbd", NULL
);
2386 destroy_workqueue(retry
.wq
);
2388 drbd_genl_unregister();
2390 idr_for_each_entry(&drbd_devices
, device
, i
)
2391 drbd_delete_device(device
);
2393 /* not _rcu since, no other updater anymore. Genl already unregistered */
2394 for_each_resource_safe(resource
, tmp
, &drbd_resources
) {
2395 list_del(&resource
->resources
);
2396 drbd_free_resource(resource
);
2399 drbd_debugfs_cleanup();
2401 drbd_destroy_mempools();
2402 unregister_blkdev(DRBD_MAJOR
, "drbd");
2404 idr_destroy(&drbd_devices
);
2406 pr_info("module cleanup done.\n");
2410 * drbd_congested() - Callback for the flusher thread
2411 * @congested_data: User data
2412 * @bdi_bits: Bits the BDI flusher thread is currently interested in
2414 * Returns 1<<WB_async_congested and/or 1<<WB_sync_congested if we are congested.
2416 static int drbd_congested(void *congested_data
, int bdi_bits
)
2418 struct drbd_device
*device
= congested_data
;
2419 struct request_queue
*q
;
2423 if (!may_inc_ap_bio(device
)) {
2424 /* DRBD has frozen IO */
2430 if (test_bit(CALLBACK_PENDING
, &first_peer_device(device
)->connection
->flags
)) {
2431 r
|= (1 << WB_async_congested
);
2432 /* Without good local data, we would need to read from remote,
2433 * and that would need the worker thread as well, which is
2434 * currently blocked waiting for that usermode helper to
2437 if (!get_ldev_if_state(device
, D_UP_TO_DATE
))
2438 r
|= (1 << WB_sync_congested
);
2446 if (get_ldev(device
)) {
2447 q
= bdev_get_queue(device
->ldev
->backing_bdev
);
2448 r
= bdi_congested(q
->backing_dev_info
, bdi_bits
);
2454 if (bdi_bits
& (1 << WB_async_congested
) &&
2455 test_bit(NET_CONGESTED
, &first_peer_device(device
)->connection
->flags
)) {
2456 r
|= (1 << WB_async_congested
);
2457 reason
= reason
== 'b' ? 'a' : 'n';
2461 device
->congestion_reason
= reason
;
2465 static void drbd_init_workqueue(struct drbd_work_queue
* wq
)
2467 spin_lock_init(&wq
->q_lock
);
2468 INIT_LIST_HEAD(&wq
->q
);
2469 init_waitqueue_head(&wq
->q_wait
);
2472 struct completion_work
{
2474 struct completion done
;
2477 static int w_complete(struct drbd_work
*w
, int cancel
)
2479 struct completion_work
*completion_work
=
2480 container_of(w
, struct completion_work
, w
);
2482 complete(&completion_work
->done
);
2486 void drbd_flush_workqueue(struct drbd_work_queue
*work_queue
)
2488 struct completion_work completion_work
;
2490 completion_work
.w
.cb
= w_complete
;
2491 init_completion(&completion_work
.done
);
2492 drbd_queue_work(work_queue
, &completion_work
.w
);
2493 wait_for_completion(&completion_work
.done
);
2496 struct drbd_resource
*drbd_find_resource(const char *name
)
2498 struct drbd_resource
*resource
;
2500 if (!name
|| !name
[0])
2504 for_each_resource_rcu(resource
, &drbd_resources
) {
2505 if (!strcmp(resource
->name
, name
)) {
2506 kref_get(&resource
->kref
);
2516 struct drbd_connection
*conn_get_by_addrs(void *my_addr
, int my_addr_len
,
2517 void *peer_addr
, int peer_addr_len
)
2519 struct drbd_resource
*resource
;
2520 struct drbd_connection
*connection
;
2523 for_each_resource_rcu(resource
, &drbd_resources
) {
2524 for_each_connection_rcu(connection
, resource
) {
2525 if (connection
->my_addr_len
== my_addr_len
&&
2526 connection
->peer_addr_len
== peer_addr_len
&&
2527 !memcmp(&connection
->my_addr
, my_addr
, my_addr_len
) &&
2528 !memcmp(&connection
->peer_addr
, peer_addr
, peer_addr_len
)) {
2529 kref_get(&connection
->kref
);
2540 static int drbd_alloc_socket(struct drbd_socket
*socket
)
2542 socket
->rbuf
= (void *) __get_free_page(GFP_KERNEL
);
2545 socket
->sbuf
= (void *) __get_free_page(GFP_KERNEL
);
2551 static void drbd_free_socket(struct drbd_socket
*socket
)
2553 free_page((unsigned long) socket
->sbuf
);
2554 free_page((unsigned long) socket
->rbuf
);
2557 void conn_free_crypto(struct drbd_connection
*connection
)
2559 drbd_free_sock(connection
);
2561 crypto_free_ahash(connection
->csums_tfm
);
2562 crypto_free_ahash(connection
->verify_tfm
);
2563 crypto_free_shash(connection
->cram_hmac_tfm
);
2564 crypto_free_ahash(connection
->integrity_tfm
);
2565 crypto_free_ahash(connection
->peer_integrity_tfm
);
2566 kfree(connection
->int_dig_in
);
2567 kfree(connection
->int_dig_vv
);
2569 connection
->csums_tfm
= NULL
;
2570 connection
->verify_tfm
= NULL
;
2571 connection
->cram_hmac_tfm
= NULL
;
2572 connection
->integrity_tfm
= NULL
;
2573 connection
->peer_integrity_tfm
= NULL
;
2574 connection
->int_dig_in
= NULL
;
2575 connection
->int_dig_vv
= NULL
;
2578 int set_resource_options(struct drbd_resource
*resource
, struct res_opts
*res_opts
)
2580 struct drbd_connection
*connection
;
2581 cpumask_var_t new_cpu_mask
;
2584 if (!zalloc_cpumask_var(&new_cpu_mask
, GFP_KERNEL
))
2587 /* silently ignore cpu mask on UP kernel */
2588 if (nr_cpu_ids
> 1 && res_opts
->cpu_mask
[0] != 0) {
2589 err
= bitmap_parse(res_opts
->cpu_mask
, DRBD_CPU_MASK_SIZE
,
2590 cpumask_bits(new_cpu_mask
), nr_cpu_ids
);
2591 if (err
== -EOVERFLOW
) {
2592 /* So what. mask it out. */
2593 cpumask_var_t tmp_cpu_mask
;
2594 if (zalloc_cpumask_var(&tmp_cpu_mask
, GFP_KERNEL
)) {
2595 cpumask_setall(tmp_cpu_mask
);
2596 cpumask_and(new_cpu_mask
, new_cpu_mask
, tmp_cpu_mask
);
2597 drbd_warn(resource
, "Overflow in bitmap_parse(%.12s%s), truncating to %u bits\n",
2599 strlen(res_opts
->cpu_mask
) > 12 ? "..." : "",
2601 free_cpumask_var(tmp_cpu_mask
);
2606 drbd_warn(resource
, "bitmap_parse() failed with %d\n", err
);
2607 /* retcode = ERR_CPU_MASK_PARSE; */
2611 resource
->res_opts
= *res_opts
;
2612 if (cpumask_empty(new_cpu_mask
))
2613 drbd_calc_cpu_mask(&new_cpu_mask
);
2614 if (!cpumask_equal(resource
->cpu_mask
, new_cpu_mask
)) {
2615 cpumask_copy(resource
->cpu_mask
, new_cpu_mask
);
2616 for_each_connection_rcu(connection
, resource
) {
2617 connection
->receiver
.reset_cpu_mask
= 1;
2618 connection
->ack_receiver
.reset_cpu_mask
= 1;
2619 connection
->worker
.reset_cpu_mask
= 1;
2625 free_cpumask_var(new_cpu_mask
);
2630 struct drbd_resource
*drbd_create_resource(const char *name
)
2632 struct drbd_resource
*resource
;
2634 resource
= kzalloc(sizeof(struct drbd_resource
), GFP_KERNEL
);
2637 resource
->name
= kstrdup(name
, GFP_KERNEL
);
2638 if (!resource
->name
)
2639 goto fail_free_resource
;
2640 if (!zalloc_cpumask_var(&resource
->cpu_mask
, GFP_KERNEL
))
2641 goto fail_free_name
;
2642 kref_init(&resource
->kref
);
2643 idr_init(&resource
->devices
);
2644 INIT_LIST_HEAD(&resource
->connections
);
2645 resource
->write_ordering
= WO_BDEV_FLUSH
;
2646 list_add_tail_rcu(&resource
->resources
, &drbd_resources
);
2647 mutex_init(&resource
->conf_update
);
2648 mutex_init(&resource
->adm_mutex
);
2649 spin_lock_init(&resource
->req_lock
);
2650 drbd_debugfs_resource_add(resource
);
2654 kfree(resource
->name
);
2661 /* caller must be under adm_mutex */
2662 struct drbd_connection
*conn_create(const char *name
, struct res_opts
*res_opts
)
2664 struct drbd_resource
*resource
;
2665 struct drbd_connection
*connection
;
2667 connection
= kzalloc(sizeof(struct drbd_connection
), GFP_KERNEL
);
2671 if (drbd_alloc_socket(&connection
->data
))
2673 if (drbd_alloc_socket(&connection
->meta
))
2676 connection
->current_epoch
= kzalloc(sizeof(struct drbd_epoch
), GFP_KERNEL
);
2677 if (!connection
->current_epoch
)
2680 INIT_LIST_HEAD(&connection
->transfer_log
);
2682 INIT_LIST_HEAD(&connection
->current_epoch
->list
);
2683 connection
->epochs
= 1;
2684 spin_lock_init(&connection
->epoch_lock
);
2686 connection
->send
.seen_any_write_yet
= false;
2687 connection
->send
.current_epoch_nr
= 0;
2688 connection
->send
.current_epoch_writes
= 0;
2690 resource
= drbd_create_resource(name
);
2694 connection
->cstate
= C_STANDALONE
;
2695 mutex_init(&connection
->cstate_mutex
);
2696 init_waitqueue_head(&connection
->ping_wait
);
2697 idr_init(&connection
->peer_devices
);
2699 drbd_init_workqueue(&connection
->sender_work
);
2700 mutex_init(&connection
->data
.mutex
);
2701 mutex_init(&connection
->meta
.mutex
);
2703 drbd_thread_init(resource
, &connection
->receiver
, drbd_receiver
, "receiver");
2704 connection
->receiver
.connection
= connection
;
2705 drbd_thread_init(resource
, &connection
->worker
, drbd_worker
, "worker");
2706 connection
->worker
.connection
= connection
;
2707 drbd_thread_init(resource
, &connection
->ack_receiver
, drbd_ack_receiver
, "ack_recv");
2708 connection
->ack_receiver
.connection
= connection
;
2710 kref_init(&connection
->kref
);
2712 connection
->resource
= resource
;
2714 if (set_resource_options(resource
, res_opts
))
2717 kref_get(&resource
->kref
);
2718 list_add_tail_rcu(&connection
->connections
, &resource
->connections
);
2719 drbd_debugfs_connection_add(connection
);
2723 list_del(&resource
->resources
);
2724 drbd_free_resource(resource
);
2726 kfree(connection
->current_epoch
);
2727 drbd_free_socket(&connection
->meta
);
2728 drbd_free_socket(&connection
->data
);
2733 void drbd_destroy_connection(struct kref
*kref
)
2735 struct drbd_connection
*connection
= container_of(kref
, struct drbd_connection
, kref
);
2736 struct drbd_resource
*resource
= connection
->resource
;
2738 if (atomic_read(&connection
->current_epoch
->epoch_size
) != 0)
2739 drbd_err(connection
, "epoch_size:%d\n", atomic_read(&connection
->current_epoch
->epoch_size
));
2740 kfree(connection
->current_epoch
);
2742 idr_destroy(&connection
->peer_devices
);
2744 drbd_free_socket(&connection
->meta
);
2745 drbd_free_socket(&connection
->data
);
2746 kfree(connection
->int_dig_in
);
2747 kfree(connection
->int_dig_vv
);
2748 memset(connection
, 0xfc, sizeof(*connection
));
2750 kref_put(&resource
->kref
, drbd_destroy_resource
);
2753 static int init_submitter(struct drbd_device
*device
)
2755 /* opencoded create_singlethread_workqueue(),
2756 * to be able to say "drbd%d", ..., minor */
2758 alloc_ordered_workqueue("drbd%u_submit", WQ_MEM_RECLAIM
, device
->minor
);
2759 if (!device
->submit
.wq
)
2762 INIT_WORK(&device
->submit
.worker
, do_submit
);
2763 INIT_LIST_HEAD(&device
->submit
.writes
);
2767 enum drbd_ret_code
drbd_create_device(struct drbd_config_context
*adm_ctx
, unsigned int minor
)
2769 struct drbd_resource
*resource
= adm_ctx
->resource
;
2770 struct drbd_connection
*connection
;
2771 struct drbd_device
*device
;
2772 struct drbd_peer_device
*peer_device
, *tmp_peer_device
;
2773 struct gendisk
*disk
;
2774 struct request_queue
*q
;
2776 int vnr
= adm_ctx
->volume
;
2777 enum drbd_ret_code err
= ERR_NOMEM
;
2779 device
= minor_to_device(minor
);
2781 return ERR_MINOR_OR_VOLUME_EXISTS
;
2783 /* GFP_KERNEL, we are outside of all write-out paths */
2784 device
= kzalloc(sizeof(struct drbd_device
), GFP_KERNEL
);
2787 kref_init(&device
->kref
);
2789 kref_get(&resource
->kref
);
2790 device
->resource
= resource
;
2791 device
->minor
= minor
;
2794 drbd_init_set_defaults(device
);
2796 q
= blk_alloc_queue_node(GFP_KERNEL
, NUMA_NO_NODE
, &resource
->req_lock
);
2799 device
->rq_queue
= q
;
2800 q
->queuedata
= device
;
2802 disk
= alloc_disk(1);
2805 device
->vdisk
= disk
;
2807 set_disk_ro(disk
, true);
2810 disk
->major
= DRBD_MAJOR
;
2811 disk
->first_minor
= minor
;
2812 disk
->fops
= &drbd_ops
;
2813 sprintf(disk
->disk_name
, "drbd%d", minor
);
2814 disk
->private_data
= device
;
2816 device
->this_bdev
= bdget(MKDEV(DRBD_MAJOR
, minor
));
2817 /* we have no partitions. we contain only ourselves. */
2818 device
->this_bdev
->bd_contains
= device
->this_bdev
;
2820 q
->backing_dev_info
->congested_fn
= drbd_congested
;
2821 q
->backing_dev_info
->congested_data
= device
;
2823 blk_queue_make_request(q
, drbd_make_request
);
2824 blk_queue_write_cache(q
, true, true);
2825 /* Setting the max_hw_sectors to an odd value of 8kibyte here
2826 This triggers a max_bio_size message upon first attach or connect */
2827 blk_queue_max_hw_sectors(q
, DRBD_MAX_BIO_SIZE_SAFE
>> 8);
2829 device
->md_io
.page
= alloc_page(GFP_KERNEL
);
2830 if (!device
->md_io
.page
)
2831 goto out_no_io_page
;
2833 if (drbd_bm_init(device
))
2835 device
->read_requests
= RB_ROOT
;
2836 device
->write_requests
= RB_ROOT
;
2838 id
= idr_alloc(&drbd_devices
, device
, minor
, minor
+ 1, GFP_KERNEL
);
2841 err
= ERR_MINOR_OR_VOLUME_EXISTS
;
2842 goto out_no_minor_idr
;
2844 kref_get(&device
->kref
);
2846 id
= idr_alloc(&resource
->devices
, device
, vnr
, vnr
+ 1, GFP_KERNEL
);
2849 err
= ERR_MINOR_OR_VOLUME_EXISTS
;
2850 goto out_idr_remove_minor
;
2852 kref_get(&device
->kref
);
2854 INIT_LIST_HEAD(&device
->peer_devices
);
2855 INIT_LIST_HEAD(&device
->pending_bitmap_io
);
2856 for_each_connection(connection
, resource
) {
2857 peer_device
= kzalloc(sizeof(struct drbd_peer_device
), GFP_KERNEL
);
2859 goto out_idr_remove_from_resource
;
2860 peer_device
->connection
= connection
;
2861 peer_device
->device
= device
;
2863 list_add(&peer_device
->peer_devices
, &device
->peer_devices
);
2864 kref_get(&device
->kref
);
2866 id
= idr_alloc(&connection
->peer_devices
, peer_device
, vnr
, vnr
+ 1, GFP_KERNEL
);
2869 err
= ERR_INVALID_REQUEST
;
2870 goto out_idr_remove_from_resource
;
2872 kref_get(&connection
->kref
);
2873 INIT_WORK(&peer_device
->send_acks_work
, drbd_send_acks_wf
);
2876 if (init_submitter(device
)) {
2878 goto out_idr_remove_vol
;
2883 /* inherit the connection state */
2884 device
->state
.conn
= first_connection(resource
)->cstate
;
2885 if (device
->state
.conn
== C_WF_REPORT_PARAMS
) {
2886 for_each_peer_device(peer_device
, device
)
2887 drbd_connected(peer_device
);
2889 /* move to create_peer_device() */
2890 for_each_peer_device(peer_device
, device
)
2891 drbd_debugfs_peer_device_add(peer_device
);
2892 drbd_debugfs_device_add(device
);
2896 idr_remove(&connection
->peer_devices
, vnr
);
2897 out_idr_remove_from_resource
:
2898 for_each_connection(connection
, resource
) {
2899 peer_device
= idr_remove(&connection
->peer_devices
, vnr
);
2901 kref_put(&connection
->kref
, drbd_destroy_connection
);
2903 for_each_peer_device_safe(peer_device
, tmp_peer_device
, device
) {
2904 list_del(&peer_device
->peer_devices
);
2907 idr_remove(&resource
->devices
, vnr
);
2908 out_idr_remove_minor
:
2909 idr_remove(&drbd_devices
, minor
);
2912 drbd_bm_cleanup(device
);
2914 __free_page(device
->md_io
.page
);
2918 blk_cleanup_queue(q
);
2920 kref_put(&resource
->kref
, drbd_destroy_resource
);
2925 void drbd_delete_device(struct drbd_device
*device
)
2927 struct drbd_resource
*resource
= device
->resource
;
2928 struct drbd_connection
*connection
;
2929 struct drbd_peer_device
*peer_device
;
2931 /* move to free_peer_device() */
2932 for_each_peer_device(peer_device
, device
)
2933 drbd_debugfs_peer_device_cleanup(peer_device
);
2934 drbd_debugfs_device_cleanup(device
);
2935 for_each_connection(connection
, resource
) {
2936 idr_remove(&connection
->peer_devices
, device
->vnr
);
2937 kref_put(&device
->kref
, drbd_destroy_device
);
2939 idr_remove(&resource
->devices
, device
->vnr
);
2940 kref_put(&device
->kref
, drbd_destroy_device
);
2941 idr_remove(&drbd_devices
, device_to_minor(device
));
2942 kref_put(&device
->kref
, drbd_destroy_device
);
2943 del_gendisk(device
->vdisk
);
2945 kref_put(&device
->kref
, drbd_destroy_device
);
2948 static int __init
drbd_init(void)
2952 if (drbd_minor_count
< DRBD_MINOR_COUNT_MIN
|| drbd_minor_count
> DRBD_MINOR_COUNT_MAX
) {
2953 pr_err("invalid minor_count (%d)\n", drbd_minor_count
);
2957 drbd_minor_count
= DRBD_MINOR_COUNT_DEF
;
2961 err
= register_blkdev(DRBD_MAJOR
, "drbd");
2963 pr_err("unable to register block device major %d\n",
2969 * allocate all necessary structs
2971 init_waitqueue_head(&drbd_pp_wait
);
2973 drbd_proc
= NULL
; /* play safe for drbd_cleanup */
2974 idr_init(&drbd_devices
);
2976 mutex_init(&resources_mutex
);
2977 INIT_LIST_HEAD(&drbd_resources
);
2979 err
= drbd_genl_register();
2981 pr_err("unable to register generic netlink family\n");
2985 err
= drbd_create_mempools();
2990 drbd_proc
= proc_create_single("drbd", S_IFREG
| 0444 , NULL
, drbd_seq_show
);
2992 pr_err("unable to register proc file\n");
2996 retry
.wq
= create_singlethread_workqueue("drbd-reissue");
2998 pr_err("unable to create retry workqueue\n");
3001 INIT_WORK(&retry
.worker
, do_retry
);
3002 spin_lock_init(&retry
.lock
);
3003 INIT_LIST_HEAD(&retry
.writes
);
3005 if (drbd_debugfs_init())
3006 pr_notice("failed to initialize debugfs -- will not be available\n");
3008 pr_info("initialized. "
3009 "Version: " REL_VERSION
" (api:%d/proto:%d-%d)\n",
3010 API_VERSION
, PRO_VERSION_MIN
, PRO_VERSION_MAX
);
3011 pr_info("%s\n", drbd_buildtag());
3012 pr_info("registered as block device major %d\n", DRBD_MAJOR
);
3013 return 0; /* Success! */
3018 pr_err("ran out of memory\n");
3020 pr_err("initialization failure\n");
3024 static void drbd_free_one_sock(struct drbd_socket
*ds
)
3027 mutex_lock(&ds
->mutex
);
3030 mutex_unlock(&ds
->mutex
);
3032 /* so debugfs does not need to mutex_lock() */
3034 kernel_sock_shutdown(s
, SHUT_RDWR
);
3039 void drbd_free_sock(struct drbd_connection
*connection
)
3041 if (connection
->data
.socket
)
3042 drbd_free_one_sock(&connection
->data
);
3043 if (connection
->meta
.socket
)
3044 drbd_free_one_sock(&connection
->meta
);
3047 /* meta data management */
3049 void conn_md_sync(struct drbd_connection
*connection
)
3051 struct drbd_peer_device
*peer_device
;
3055 idr_for_each_entry(&connection
->peer_devices
, peer_device
, vnr
) {
3056 struct drbd_device
*device
= peer_device
->device
;
3058 kref_get(&device
->kref
);
3060 drbd_md_sync(device
);
3061 kref_put(&device
->kref
, drbd_destroy_device
);
3067 /* aligned 4kByte */
3068 struct meta_data_on_disk
{
3069 u64 la_size_sect
; /* last agreed size. */
3070 u64 uuid
[UI_SIZE
]; /* UUIDs. */
3073 u32 flags
; /* MDF */
3076 u32 al_offset
; /* offset to this block */
3077 u32 al_nr_extents
; /* important for restoring the AL (userspace) */
3078 /* `-- act_log->nr_elements <-- ldev->dc.al_extents */
3079 u32 bm_offset
; /* offset to the bitmap, from here */
3080 u32 bm_bytes_per_bit
; /* BM_BLOCK_SIZE */
3081 u32 la_peer_max_bio_size
; /* last peer max_bio_size */
3083 /* see al_tr_number_to_on_disk_sector() */
3085 u32 al_stripe_size_4k
;
3087 u8 reserved_u8
[4096 - (7*8 + 10*4)];
3092 void drbd_md_write(struct drbd_device
*device
, void *b
)
3094 struct meta_data_on_disk
*buffer
= b
;
3098 memset(buffer
, 0, sizeof(*buffer
));
3100 buffer
->la_size_sect
= cpu_to_be64(drbd_get_capacity(device
->this_bdev
));
3101 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3102 buffer
->uuid
[i
] = cpu_to_be64(device
->ldev
->md
.uuid
[i
]);
3103 buffer
->flags
= cpu_to_be32(device
->ldev
->md
.flags
);
3104 buffer
->magic
= cpu_to_be32(DRBD_MD_MAGIC_84_UNCLEAN
);
3106 buffer
->md_size_sect
= cpu_to_be32(device
->ldev
->md
.md_size_sect
);
3107 buffer
->al_offset
= cpu_to_be32(device
->ldev
->md
.al_offset
);
3108 buffer
->al_nr_extents
= cpu_to_be32(device
->act_log
->nr_elements
);
3109 buffer
->bm_bytes_per_bit
= cpu_to_be32(BM_BLOCK_SIZE
);
3110 buffer
->device_uuid
= cpu_to_be64(device
->ldev
->md
.device_uuid
);
3112 buffer
->bm_offset
= cpu_to_be32(device
->ldev
->md
.bm_offset
);
3113 buffer
->la_peer_max_bio_size
= cpu_to_be32(device
->peer_max_bio_size
);
3115 buffer
->al_stripes
= cpu_to_be32(device
->ldev
->md
.al_stripes
);
3116 buffer
->al_stripe_size_4k
= cpu_to_be32(device
->ldev
->md
.al_stripe_size_4k
);
3118 D_ASSERT(device
, drbd_md_ss(device
->ldev
) == device
->ldev
->md
.md_offset
);
3119 sector
= device
->ldev
->md
.md_offset
;
3121 if (drbd_md_sync_page_io(device
, device
->ldev
, sector
, REQ_OP_WRITE
)) {
3122 /* this was a try anyways ... */
3123 drbd_err(device
, "meta data update failed!\n");
3124 drbd_chk_io_error(device
, 1, DRBD_META_IO_ERROR
);
3129 * drbd_md_sync() - Writes the meta data super block if the MD_DIRTY flag bit is set
3130 * @device: DRBD device.
3132 void drbd_md_sync(struct drbd_device
*device
)
3134 struct meta_data_on_disk
*buffer
;
3136 /* Don't accidentally change the DRBD meta data layout. */
3137 BUILD_BUG_ON(UI_SIZE
!= 4);
3138 BUILD_BUG_ON(sizeof(struct meta_data_on_disk
) != 4096);
3140 del_timer(&device
->md_sync_timer
);
3141 /* timer may be rearmed by drbd_md_mark_dirty() now. */
3142 if (!test_and_clear_bit(MD_DIRTY
, &device
->flags
))
3145 /* We use here D_FAILED and not D_ATTACHING because we try to write
3146 * metadata even if we detach due to a disk failure! */
3147 if (!get_ldev_if_state(device
, D_FAILED
))
3150 buffer
= drbd_md_get_buffer(device
, __func__
);
3154 drbd_md_write(device
, buffer
);
3156 /* Update device->ldev->md.la_size_sect,
3157 * since we updated it on metadata. */
3158 device
->ldev
->md
.la_size_sect
= drbd_get_capacity(device
->this_bdev
);
3160 drbd_md_put_buffer(device
);
3165 static int check_activity_log_stripe_size(struct drbd_device
*device
,
3166 struct meta_data_on_disk
*on_disk
,
3167 struct drbd_md
*in_core
)
3169 u32 al_stripes
= be32_to_cpu(on_disk
->al_stripes
);
3170 u32 al_stripe_size_4k
= be32_to_cpu(on_disk
->al_stripe_size_4k
);
3173 /* both not set: default to old fixed size activity log */
3174 if (al_stripes
== 0 && al_stripe_size_4k
== 0) {
3176 al_stripe_size_4k
= MD_32kB_SECT
/8;
3179 /* some paranoia plausibility checks */
3181 /* we need both values to be set */
3182 if (al_stripes
== 0 || al_stripe_size_4k
== 0)
3185 al_size_4k
= (u64
)al_stripes
* al_stripe_size_4k
;
3187 /* Upper limit of activity log area, to avoid potential overflow
3188 * problems in al_tr_number_to_on_disk_sector(). As right now, more
3189 * than 72 * 4k blocks total only increases the amount of history,
3190 * limiting this arbitrarily to 16 GB is not a real limitation ;-) */
3191 if (al_size_4k
> (16 * 1024 * 1024/4))
3194 /* Lower limit: we need at least 8 transaction slots (32kB)
3195 * to not break existing setups */
3196 if (al_size_4k
< MD_32kB_SECT
/8)
3199 in_core
->al_stripe_size_4k
= al_stripe_size_4k
;
3200 in_core
->al_stripes
= al_stripes
;
3201 in_core
->al_size_4k
= al_size_4k
;
3205 drbd_err(device
, "invalid activity log striping: al_stripes=%u, al_stripe_size_4k=%u\n",
3206 al_stripes
, al_stripe_size_4k
);
3210 static int check_offsets_and_sizes(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
)
3212 sector_t capacity
= drbd_get_capacity(bdev
->md_bdev
);
3213 struct drbd_md
*in_core
= &bdev
->md
;
3214 s32 on_disk_al_sect
;
3215 s32 on_disk_bm_sect
;
3217 /* The on-disk size of the activity log, calculated from offsets, and
3218 * the size of the activity log calculated from the stripe settings,
3220 * Though we could relax this a bit: it is ok, if the striped activity log
3221 * fits in the available on-disk activity log size.
3222 * Right now, that would break how resize is implemented.
3223 * TODO: make drbd_determine_dev_size() (and the drbdmeta tool) aware
3224 * of possible unused padding space in the on disk layout. */
3225 if (in_core
->al_offset
< 0) {
3226 if (in_core
->bm_offset
> in_core
->al_offset
)
3228 on_disk_al_sect
= -in_core
->al_offset
;
3229 on_disk_bm_sect
= in_core
->al_offset
- in_core
->bm_offset
;
3231 if (in_core
->al_offset
!= MD_4kB_SECT
)
3233 if (in_core
->bm_offset
< in_core
->al_offset
+ in_core
->al_size_4k
* MD_4kB_SECT
)
3236 on_disk_al_sect
= in_core
->bm_offset
- MD_4kB_SECT
;
3237 on_disk_bm_sect
= in_core
->md_size_sect
- in_core
->bm_offset
;
3240 /* old fixed size meta data is exactly that: fixed. */
3241 if (in_core
->meta_dev_idx
>= 0) {
3242 if (in_core
->md_size_sect
!= MD_128MB_SECT
3243 || in_core
->al_offset
!= MD_4kB_SECT
3244 || in_core
->bm_offset
!= MD_4kB_SECT
+ MD_32kB_SECT
3245 || in_core
->al_stripes
!= 1
3246 || in_core
->al_stripe_size_4k
!= MD_32kB_SECT
/8)
3250 if (capacity
< in_core
->md_size_sect
)
3252 if (capacity
- in_core
->md_size_sect
< drbd_md_first_sector(bdev
))
3255 /* should be aligned, and at least 32k */
3256 if ((on_disk_al_sect
& 7) || (on_disk_al_sect
< MD_32kB_SECT
))
3259 /* should fit (for now: exactly) into the available on-disk space;
3260 * overflow prevention is in check_activity_log_stripe_size() above. */
3261 if (on_disk_al_sect
!= in_core
->al_size_4k
* MD_4kB_SECT
)
3264 /* again, should be aligned */
3265 if (in_core
->bm_offset
& 7)
3268 /* FIXME check for device grow with flex external meta data? */
3270 /* can the available bitmap space cover the last agreed device size? */
3271 if (on_disk_bm_sect
< (in_core
->la_size_sect
+7)/MD_4kB_SECT
/8/512)
3277 drbd_err(device
, "meta data offsets don't make sense: idx=%d "
3278 "al_s=%u, al_sz4k=%u, al_offset=%d, bm_offset=%d, "
3279 "md_size_sect=%u, la_size=%llu, md_capacity=%llu\n",
3280 in_core
->meta_dev_idx
,
3281 in_core
->al_stripes
, in_core
->al_stripe_size_4k
,
3282 in_core
->al_offset
, in_core
->bm_offset
, in_core
->md_size_sect
,
3283 (unsigned long long)in_core
->la_size_sect
,
3284 (unsigned long long)capacity
);
3291 * drbd_md_read() - Reads in the meta data super block
3292 * @device: DRBD device.
3293 * @bdev: Device from which the meta data should be read in.
3295 * Return NO_ERROR on success, and an enum drbd_ret_code in case
3296 * something goes wrong.
3298 * Called exactly once during drbd_adm_attach(), while still being D_DISKLESS,
3299 * even before @bdev is assigned to @device->ldev.
3301 int drbd_md_read(struct drbd_device
*device
, struct drbd_backing_dev
*bdev
)
3303 struct meta_data_on_disk
*buffer
;
3305 int i
, rv
= NO_ERROR
;
3307 if (device
->state
.disk
!= D_DISKLESS
)
3308 return ERR_DISK_CONFIGURED
;
3310 buffer
= drbd_md_get_buffer(device
, __func__
);
3314 /* First, figure out where our meta data superblock is located,
3316 bdev
->md
.meta_dev_idx
= bdev
->disk_conf
->meta_dev_idx
;
3317 bdev
->md
.md_offset
= drbd_md_ss(bdev
);
3318 /* Even for (flexible or indexed) external meta data,
3319 * initially restrict us to the 4k superblock for now.
3320 * Affects the paranoia out-of-range access check in drbd_md_sync_page_io(). */
3321 bdev
->md
.md_size_sect
= 8;
3323 if (drbd_md_sync_page_io(device
, bdev
, bdev
->md
.md_offset
,
3325 /* NOTE: can't do normal error processing here as this is
3326 called BEFORE disk is attached */
3327 drbd_err(device
, "Error while reading metadata.\n");
3328 rv
= ERR_IO_MD_DISK
;
3332 magic
= be32_to_cpu(buffer
->magic
);
3333 flags
= be32_to_cpu(buffer
->flags
);
3334 if (magic
== DRBD_MD_MAGIC_84_UNCLEAN
||
3335 (magic
== DRBD_MD_MAGIC_08
&& !(flags
& MDF_AL_CLEAN
))) {
3336 /* btw: that's Activity Log clean, not "all" clean. */
3337 drbd_err(device
, "Found unclean meta data. Did you \"drbdadm apply-al\"?\n");
3338 rv
= ERR_MD_UNCLEAN
;
3342 rv
= ERR_MD_INVALID
;
3343 if (magic
!= DRBD_MD_MAGIC_08
) {
3344 if (magic
== DRBD_MD_MAGIC_07
)
3345 drbd_err(device
, "Found old (0.7) meta data magic. Did you \"drbdadm create-md\"?\n");
3347 drbd_err(device
, "Meta data magic not found. Did you \"drbdadm create-md\"?\n");
3351 if (be32_to_cpu(buffer
->bm_bytes_per_bit
) != BM_BLOCK_SIZE
) {
3352 drbd_err(device
, "unexpected bm_bytes_per_bit: %u (expected %u)\n",
3353 be32_to_cpu(buffer
->bm_bytes_per_bit
), BM_BLOCK_SIZE
);
3358 /* convert to in_core endian */
3359 bdev
->md
.la_size_sect
= be64_to_cpu(buffer
->la_size_sect
);
3360 for (i
= UI_CURRENT
; i
< UI_SIZE
; i
++)
3361 bdev
->md
.uuid
[i
] = be64_to_cpu(buffer
->uuid
[i
]);
3362 bdev
->md
.flags
= be32_to_cpu(buffer
->flags
);
3363 bdev
->md
.device_uuid
= be64_to_cpu(buffer
->device_uuid
);
3365 bdev
->md
.md_size_sect
= be32_to_cpu(buffer
->md_size_sect
);
3366 bdev
->md
.al_offset
= be32_to_cpu(buffer
->al_offset
);
3367 bdev
->md
.bm_offset
= be32_to_cpu(buffer
->bm_offset
);
3369 if (check_activity_log_stripe_size(device
, buffer
, &bdev
->md
))
3371 if (check_offsets_and_sizes(device
, bdev
))
3374 if (be32_to_cpu(buffer
->bm_offset
) != bdev
->md
.bm_offset
) {
3375 drbd_err(device
, "unexpected bm_offset: %d (expected %d)\n",
3376 be32_to_cpu(buffer
->bm_offset
), bdev
->md
.bm_offset
);
3379 if (be32_to_cpu(buffer
->md_size_sect
) != bdev
->md
.md_size_sect
) {
3380 drbd_err(device
, "unexpected md_size: %u (expected %u)\n",
3381 be32_to_cpu(buffer
->md_size_sect
), bdev
->md
.md_size_sect
);
3387 spin_lock_irq(&device
->resource
->req_lock
);
3388 if (device
->state
.conn
< C_CONNECTED
) {
3390 peer
= be32_to_cpu(buffer
->la_peer_max_bio_size
);
3391 peer
= max(peer
, DRBD_MAX_BIO_SIZE_SAFE
);
3392 device
->peer_max_bio_size
= peer
;
3394 spin_unlock_irq(&device
->resource
->req_lock
);
3397 drbd_md_put_buffer(device
);
3403 * drbd_md_mark_dirty() - Mark meta data super block as dirty
3404 * @device: DRBD device.
3406 * Call this function if you change anything that should be written to
3407 * the meta-data super block. This function sets MD_DIRTY, and starts a
3408 * timer that ensures that within five seconds you have to call drbd_md_sync().
3411 void drbd_md_mark_dirty_(struct drbd_device
*device
, unsigned int line
, const char *func
)
3413 if (!test_and_set_bit(MD_DIRTY
, &device
->flags
)) {
3414 mod_timer(&device
->md_sync_timer
, jiffies
+ HZ
);
3415 device
->last_md_mark_dirty
.line
= line
;
3416 device
->last_md_mark_dirty
.func
= func
;
3420 void drbd_md_mark_dirty(struct drbd_device
*device
)
3422 if (!test_and_set_bit(MD_DIRTY
, &device
->flags
))
3423 mod_timer(&device
->md_sync_timer
, jiffies
+ 5*HZ
);
3427 void drbd_uuid_move_history(struct drbd_device
*device
) __must_hold(local
)
3431 for (i
= UI_HISTORY_START
; i
< UI_HISTORY_END
; i
++)
3432 device
->ldev
->md
.uuid
[i
+1] = device
->ldev
->md
.uuid
[i
];
3435 void __drbd_uuid_set(struct drbd_device
*device
, int idx
, u64 val
) __must_hold(local
)
3437 if (idx
== UI_CURRENT
) {
3438 if (device
->state
.role
== R_PRIMARY
)
3443 drbd_set_ed_uuid(device
, val
);
3446 device
->ldev
->md
.uuid
[idx
] = val
;
3447 drbd_md_mark_dirty(device
);
3450 void _drbd_uuid_set(struct drbd_device
*device
, int idx
, u64 val
) __must_hold(local
)
3452 unsigned long flags
;
3453 spin_lock_irqsave(&device
->ldev
->md
.uuid_lock
, flags
);
3454 __drbd_uuid_set(device
, idx
, val
);
3455 spin_unlock_irqrestore(&device
->ldev
->md
.uuid_lock
, flags
);
3458 void drbd_uuid_set(struct drbd_device
*device
, int idx
, u64 val
) __must_hold(local
)
3460 unsigned long flags
;
3461 spin_lock_irqsave(&device
->ldev
->md
.uuid_lock
, flags
);
3462 if (device
->ldev
->md
.uuid
[idx
]) {
3463 drbd_uuid_move_history(device
);
3464 device
->ldev
->md
.uuid
[UI_HISTORY_START
] = device
->ldev
->md
.uuid
[idx
];
3466 __drbd_uuid_set(device
, idx
, val
);
3467 spin_unlock_irqrestore(&device
->ldev
->md
.uuid_lock
, flags
);
3471 * drbd_uuid_new_current() - Creates a new current UUID
3472 * @device: DRBD device.
3474 * Creates a new current UUID, and rotates the old current UUID into
3475 * the bitmap slot. Causes an incremental resync upon next connect.
3477 void drbd_uuid_new_current(struct drbd_device
*device
) __must_hold(local
)
3480 unsigned long long bm_uuid
;
3482 get_random_bytes(&val
, sizeof(u64
));
3484 spin_lock_irq(&device
->ldev
->md
.uuid_lock
);
3485 bm_uuid
= device
->ldev
->md
.uuid
[UI_BITMAP
];
3488 drbd_warn(device
, "bm UUID was already set: %llX\n", bm_uuid
);
3490 device
->ldev
->md
.uuid
[UI_BITMAP
] = device
->ldev
->md
.uuid
[UI_CURRENT
];
3491 __drbd_uuid_set(device
, UI_CURRENT
, val
);
3492 spin_unlock_irq(&device
->ldev
->md
.uuid_lock
);
3494 drbd_print_uuids(device
, "new current UUID");
3495 /* get it to stable storage _now_ */
3496 drbd_md_sync(device
);
3499 void drbd_uuid_set_bm(struct drbd_device
*device
, u64 val
) __must_hold(local
)
3501 unsigned long flags
;
3502 if (device
->ldev
->md
.uuid
[UI_BITMAP
] == 0 && val
== 0)
3505 spin_lock_irqsave(&device
->ldev
->md
.uuid_lock
, flags
);
3507 drbd_uuid_move_history(device
);
3508 device
->ldev
->md
.uuid
[UI_HISTORY_START
] = device
->ldev
->md
.uuid
[UI_BITMAP
];
3509 device
->ldev
->md
.uuid
[UI_BITMAP
] = 0;
3511 unsigned long long bm_uuid
= device
->ldev
->md
.uuid
[UI_BITMAP
];
3513 drbd_warn(device
, "bm UUID was already set: %llX\n", bm_uuid
);
3515 device
->ldev
->md
.uuid
[UI_BITMAP
] = val
& ~((u64
)1);
3517 spin_unlock_irqrestore(&device
->ldev
->md
.uuid_lock
, flags
);
3519 drbd_md_mark_dirty(device
);
3523 * drbd_bmio_set_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3524 * @device: DRBD device.
3526 * Sets all bits in the bitmap and writes the whole bitmap to stable storage.
3528 int drbd_bmio_set_n_write(struct drbd_device
*device
) __must_hold(local
)
3532 drbd_md_set_flag(device
, MDF_FULL_SYNC
);
3533 drbd_md_sync(device
);
3534 drbd_bm_set_all(device
);
3536 rv
= drbd_bm_write(device
);
3539 drbd_md_clear_flag(device
, MDF_FULL_SYNC
);
3540 drbd_md_sync(device
);
3547 * drbd_bmio_clear_n_write() - io_fn for drbd_queue_bitmap_io() or drbd_bitmap_io()
3548 * @device: DRBD device.
3550 * Clears all bits in the bitmap and writes the whole bitmap to stable storage.
3552 int drbd_bmio_clear_n_write(struct drbd_device
*device
) __must_hold(local
)
3554 drbd_resume_al(device
);
3555 drbd_bm_clear_all(device
);
3556 return drbd_bm_write(device
);
3559 static int w_bitmap_io(struct drbd_work
*w
, int unused
)
3561 struct drbd_device
*device
=
3562 container_of(w
, struct drbd_device
, bm_io_work
.w
);
3563 struct bm_io_work
*work
= &device
->bm_io_work
;
3566 if (work
->flags
!= BM_LOCKED_CHANGE_ALLOWED
) {
3567 int cnt
= atomic_read(&device
->ap_bio_cnt
);
3569 drbd_err(device
, "FIXME: ap_bio_cnt %d, expected 0; queued for '%s'\n",
3573 if (get_ldev(device
)) {
3574 drbd_bm_lock(device
, work
->why
, work
->flags
);
3575 rv
= work
->io_fn(device
);
3576 drbd_bm_unlock(device
);
3580 clear_bit_unlock(BITMAP_IO
, &device
->flags
);
3581 wake_up(&device
->misc_wait
);
3584 work
->done(device
, rv
);
3586 clear_bit(BITMAP_IO_QUEUED
, &device
->flags
);
3594 * drbd_queue_bitmap_io() - Queues an IO operation on the whole bitmap
3595 * @device: DRBD device.
3596 * @io_fn: IO callback to be called when bitmap IO is possible
3597 * @done: callback to be called after the bitmap IO was performed
3598 * @why: Descriptive text of the reason for doing the IO
3600 * While IO on the bitmap happens we freeze application IO thus we ensure
3601 * that drbd_set_out_of_sync() can not be called. This function MAY ONLY be
3602 * called from worker context. It MUST NOT be used while a previous such
3603 * work is still pending!
3605 * Its worker function encloses the call of io_fn() by get_ldev() and
3608 void drbd_queue_bitmap_io(struct drbd_device
*device
,
3609 int (*io_fn
)(struct drbd_device
*),
3610 void (*done
)(struct drbd_device
*, int),
3611 char *why
, enum bm_flag flags
)
3613 D_ASSERT(device
, current
== first_peer_device(device
)->connection
->worker
.task
);
3615 D_ASSERT(device
, !test_bit(BITMAP_IO_QUEUED
, &device
->flags
));
3616 D_ASSERT(device
, !test_bit(BITMAP_IO
, &device
->flags
));
3617 D_ASSERT(device
, list_empty(&device
->bm_io_work
.w
.list
));
3618 if (device
->bm_io_work
.why
)
3619 drbd_err(device
, "FIXME going to queue '%s' but '%s' still pending?\n",
3620 why
, device
->bm_io_work
.why
);
3622 device
->bm_io_work
.io_fn
= io_fn
;
3623 device
->bm_io_work
.done
= done
;
3624 device
->bm_io_work
.why
= why
;
3625 device
->bm_io_work
.flags
= flags
;
3627 spin_lock_irq(&device
->resource
->req_lock
);
3628 set_bit(BITMAP_IO
, &device
->flags
);
3629 /* don't wait for pending application IO if the caller indicates that
3630 * application IO does not conflict anyways. */
3631 if (flags
== BM_LOCKED_CHANGE_ALLOWED
|| atomic_read(&device
->ap_bio_cnt
) == 0) {
3632 if (!test_and_set_bit(BITMAP_IO_QUEUED
, &device
->flags
))
3633 drbd_queue_work(&first_peer_device(device
)->connection
->sender_work
,
3634 &device
->bm_io_work
.w
);
3636 spin_unlock_irq(&device
->resource
->req_lock
);
3640 * drbd_bitmap_io() - Does an IO operation on the whole bitmap
3641 * @device: DRBD device.
3642 * @io_fn: IO callback to be called when bitmap IO is possible
3643 * @why: Descriptive text of the reason for doing the IO
3645 * freezes application IO while that the actual IO operations runs. This
3646 * functions MAY NOT be called from worker context.
3648 int drbd_bitmap_io(struct drbd_device
*device
, int (*io_fn
)(struct drbd_device
*),
3649 char *why
, enum bm_flag flags
)
3651 /* Only suspend io, if some operation is supposed to be locked out */
3652 const bool do_suspend_io
= flags
& (BM_DONT_CLEAR
|BM_DONT_SET
|BM_DONT_TEST
);
3655 D_ASSERT(device
, current
!= first_peer_device(device
)->connection
->worker
.task
);
3658 drbd_suspend_io(device
);
3660 drbd_bm_lock(device
, why
, flags
);
3662 drbd_bm_unlock(device
);
3665 drbd_resume_io(device
);
3670 void drbd_md_set_flag(struct drbd_device
*device
, int flag
) __must_hold(local
)
3672 if ((device
->ldev
->md
.flags
& flag
) != flag
) {
3673 drbd_md_mark_dirty(device
);
3674 device
->ldev
->md
.flags
|= flag
;
3678 void drbd_md_clear_flag(struct drbd_device
*device
, int flag
) __must_hold(local
)
3680 if ((device
->ldev
->md
.flags
& flag
) != 0) {
3681 drbd_md_mark_dirty(device
);
3682 device
->ldev
->md
.flags
&= ~flag
;
3685 int drbd_md_test_flag(struct drbd_backing_dev
*bdev
, int flag
)
3687 return (bdev
->md
.flags
& flag
) != 0;
3690 static void md_sync_timer_fn(struct timer_list
*t
)
3692 struct drbd_device
*device
= from_timer(device
, t
, md_sync_timer
);
3693 drbd_device_post_work(device
, MD_SYNC
);
3696 const char *cmdname(enum drbd_packet cmd
)
3698 /* THINK may need to become several global tables
3699 * when we want to support more than
3700 * one PRO_VERSION */
3701 static const char *cmdnames
[] = {
3703 [P_WSAME
] = "WriteSame",
3705 [P_DATA_REPLY
] = "DataReply",
3706 [P_RS_DATA_REPLY
] = "RSDataReply",
3707 [P_BARRIER
] = "Barrier",
3708 [P_BITMAP
] = "ReportBitMap",
3709 [P_BECOME_SYNC_TARGET
] = "BecomeSyncTarget",
3710 [P_BECOME_SYNC_SOURCE
] = "BecomeSyncSource",
3711 [P_UNPLUG_REMOTE
] = "UnplugRemote",
3712 [P_DATA_REQUEST
] = "DataRequest",
3713 [P_RS_DATA_REQUEST
] = "RSDataRequest",
3714 [P_SYNC_PARAM
] = "SyncParam",
3715 [P_SYNC_PARAM89
] = "SyncParam89",
3716 [P_PROTOCOL
] = "ReportProtocol",
3717 [P_UUIDS
] = "ReportUUIDs",
3718 [P_SIZES
] = "ReportSizes",
3719 [P_STATE
] = "ReportState",
3720 [P_SYNC_UUID
] = "ReportSyncUUID",
3721 [P_AUTH_CHALLENGE
] = "AuthChallenge",
3722 [P_AUTH_RESPONSE
] = "AuthResponse",
3724 [P_PING_ACK
] = "PingAck",
3725 [P_RECV_ACK
] = "RecvAck",
3726 [P_WRITE_ACK
] = "WriteAck",
3727 [P_RS_WRITE_ACK
] = "RSWriteAck",
3728 [P_SUPERSEDED
] = "Superseded",
3729 [P_NEG_ACK
] = "NegAck",
3730 [P_NEG_DREPLY
] = "NegDReply",
3731 [P_NEG_RS_DREPLY
] = "NegRSDReply",
3732 [P_BARRIER_ACK
] = "BarrierAck",
3733 [P_STATE_CHG_REQ
] = "StateChgRequest",
3734 [P_STATE_CHG_REPLY
] = "StateChgReply",
3735 [P_OV_REQUEST
] = "OVRequest",
3736 [P_OV_REPLY
] = "OVReply",
3737 [P_OV_RESULT
] = "OVResult",
3738 [P_CSUM_RS_REQUEST
] = "CsumRSRequest",
3739 [P_RS_IS_IN_SYNC
] = "CsumRSIsInSync",
3740 [P_COMPRESSED_BITMAP
] = "CBitmap",
3741 [P_DELAY_PROBE
] = "DelayProbe",
3742 [P_OUT_OF_SYNC
] = "OutOfSync",
3743 [P_RETRY_WRITE
] = "RetryWrite",
3744 [P_RS_CANCEL
] = "RSCancel",
3745 [P_CONN_ST_CHG_REQ
] = "conn_st_chg_req",
3746 [P_CONN_ST_CHG_REPLY
] = "conn_st_chg_reply",
3747 [P_RETRY_WRITE
] = "retry_write",
3748 [P_PROTOCOL_UPDATE
] = "protocol_update",
3749 [P_RS_THIN_REQ
] = "rs_thin_req",
3750 [P_RS_DEALLOCATED
] = "rs_deallocated",
3752 /* enum drbd_packet, but not commands - obsoleted flags:
3758 /* too big for the array: 0xfffX */
3759 if (cmd
== P_INITIAL_META
)
3760 return "InitialMeta";
3761 if (cmd
== P_INITIAL_DATA
)
3762 return "InitialData";
3763 if (cmd
== P_CONNECTION_FEATURES
)
3764 return "ConnectionFeatures";
3765 if (cmd
>= ARRAY_SIZE(cmdnames
))
3767 return cmdnames
[cmd
];
3771 * drbd_wait_misc - wait for a request to make progress
3772 * @device: device associated with the request
3773 * @i: the struct drbd_interval embedded in struct drbd_request or
3774 * struct drbd_peer_request
3776 int drbd_wait_misc(struct drbd_device
*device
, struct drbd_interval
*i
)
3778 struct net_conf
*nc
;
3783 nc
= rcu_dereference(first_peer_device(device
)->connection
->net_conf
);
3788 timeout
= nc
->ko_count
? nc
->timeout
* HZ
/ 10 * nc
->ko_count
: MAX_SCHEDULE_TIMEOUT
;
3791 /* Indicate to wake up device->misc_wait on progress. */
3793 prepare_to_wait(&device
->misc_wait
, &wait
, TASK_INTERRUPTIBLE
);
3794 spin_unlock_irq(&device
->resource
->req_lock
);
3795 timeout
= schedule_timeout(timeout
);
3796 finish_wait(&device
->misc_wait
, &wait
);
3797 spin_lock_irq(&device
->resource
->req_lock
);
3798 if (!timeout
|| device
->state
.conn
< C_CONNECTED
)
3800 if (signal_pending(current
))
3801 return -ERESTARTSYS
;
3805 void lock_all_resources(void)
3807 struct drbd_resource
*resource
;
3808 int __maybe_unused i
= 0;
3810 mutex_lock(&resources_mutex
);
3811 local_irq_disable();
3812 for_each_resource(resource
, &drbd_resources
)
3813 spin_lock_nested(&resource
->req_lock
, i
++);
3816 void unlock_all_resources(void)
3818 struct drbd_resource
*resource
;
3820 for_each_resource(resource
, &drbd_resources
)
3821 spin_unlock(&resource
->req_lock
);
3823 mutex_unlock(&resources_mutex
);
3826 #ifdef CONFIG_DRBD_FAULT_INJECTION
3827 /* Fault insertion support including random number generator shamelessly
3828 * stolen from kernel/rcutorture.c */
3829 struct fault_random_state
{
3830 unsigned long state
;
3831 unsigned long count
;
3834 #define FAULT_RANDOM_MULT 39916801 /* prime */
3835 #define FAULT_RANDOM_ADD 479001701 /* prime */
3836 #define FAULT_RANDOM_REFRESH 10000
3839 * Crude but fast random-number generator. Uses a linear congruential
3840 * generator, with occasional help from get_random_bytes().
3842 static unsigned long
3843 _drbd_fault_random(struct fault_random_state
*rsp
)
3847 if (!rsp
->count
--) {
3848 get_random_bytes(&refresh
, sizeof(refresh
));
3849 rsp
->state
+= refresh
;
3850 rsp
->count
= FAULT_RANDOM_REFRESH
;
3852 rsp
->state
= rsp
->state
* FAULT_RANDOM_MULT
+ FAULT_RANDOM_ADD
;
3853 return swahw32(rsp
->state
);
3857 _drbd_fault_str(unsigned int type
) {
3858 static char *_faults
[] = {
3859 [DRBD_FAULT_MD_WR
] = "Meta-data write",
3860 [DRBD_FAULT_MD_RD
] = "Meta-data read",
3861 [DRBD_FAULT_RS_WR
] = "Resync write",
3862 [DRBD_FAULT_RS_RD
] = "Resync read",
3863 [DRBD_FAULT_DT_WR
] = "Data write",
3864 [DRBD_FAULT_DT_RD
] = "Data read",
3865 [DRBD_FAULT_DT_RA
] = "Data read ahead",
3866 [DRBD_FAULT_BM_ALLOC
] = "BM allocation",
3867 [DRBD_FAULT_AL_EE
] = "EE allocation",
3868 [DRBD_FAULT_RECEIVE
] = "receive data corruption",
3871 return (type
< DRBD_FAULT_MAX
) ? _faults
[type
] : "**Unknown**";
3875 _drbd_insert_fault(struct drbd_device
*device
, unsigned int type
)
3877 static struct fault_random_state rrs
= {0, 0};
3879 unsigned int ret
= (
3880 (drbd_fault_devs
== 0 ||
3881 ((1 << device_to_minor(device
)) & drbd_fault_devs
) != 0) &&
3882 (((_drbd_fault_random(&rrs
) % 100) + 1) <= drbd_fault_rate
));
3887 if (__ratelimit(&drbd_ratelimit_state
))
3888 drbd_warn(device
, "***Simulating %s failure\n",
3889 _drbd_fault_str(type
));
3896 const char *drbd_buildtag(void)
3898 /* DRBD built from external sources has here a reference to the
3899 git hash of the source code. */
3901 static char buildtag
[38] = "\0uilt-in";
3903 if (buildtag
[0] == 0) {
3905 sprintf(buildtag
, "srcversion: %-24s", THIS_MODULE
->srcversion
);
3914 module_init(drbd_init
)
3915 module_exit(drbd_cleanup
)
3917 EXPORT_SYMBOL(drbd_conn_str
);
3918 EXPORT_SYMBOL(drbd_role_str
);
3919 EXPORT_SYMBOL(drbd_disk_str
);
3920 EXPORT_SYMBOL(drbd_set_st_err_str
);