4 This file is part of DRBD by Philipp Reisner and Lars Ellenberg.
6 Copyright (C) 2001-2008, LINBIT Information Technologies GmbH.
7 Copyright (C) 1999-2008, Philipp Reisner <philipp.reisner@linbit.com>.
8 Copyright (C) 2002-2008, Lars Ellenberg <lars.ellenberg@linbit.com>.
10 drbd is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation; either version 2, or (at your option)
15 drbd is distributed in the hope that it will be useful,
16 but WITHOUT ANY WARRANTY; without even the implied warranty of
17 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 GNU General Public License for more details.
20 You should have received a copy of the GNU General Public License
21 along with drbd; see the file COPYING. If not, write to
22 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 #include <linux/module.h>
27 #include <linux/drbd.h>
28 #include <linux/sched.h>
29 #include <linux/smp_lock.h>
30 #include <linux/wait.h>
32 #include <linux/memcontrol.h>
33 #include <linux/mm_inline.h>
34 #include <linux/slab.h>
35 #include <linux/random.h>
36 #include <linux/string.h>
37 #include <linux/scatterlist.h>
42 static int w_make_ov_request(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
);
51 * more endio handlers:
52 atodb_endio in drbd_actlog.c
53 drbd_bm_async_io_complete in drbd_bitmap.c
55 * For all these callbacks, note the following:
56 * The callbacks will be called in irq context by the IDE drivers,
57 * and in Softirqs/Tasklets/BH context by the SCSI drivers.
58 * Try to get the locking right :)
63 /* About the global_state_lock
64 Each state transition on an device holds a read lock. In case we have
65 to evaluate the sync after dependencies, we grab a write lock, because
66 we need stable states on all devices for that. */
67 rwlock_t global_state_lock
;
69 /* used for synchronous meta data and bitmap IO
70 * submitted by drbd_md_sync_page_io()
72 void drbd_md_io_complete(struct bio
*bio
, int error
)
74 struct drbd_md_io
*md_io
;
76 md_io
= (struct drbd_md_io
*)bio
->bi_private
;
79 complete(&md_io
->event
);
82 /* reads on behalf of the partner,
83 * "submitted" by the receiver
85 void drbd_endio_read_sec_final(struct drbd_epoch_entry
*e
) __releases(local
)
87 unsigned long flags
= 0;
88 struct drbd_conf
*mdev
= e
->mdev
;
90 D_ASSERT(e
->block_id
!= ID_VACANT
);
92 spin_lock_irqsave(&mdev
->req_lock
, flags
);
93 mdev
->read_cnt
+= e
->size
>> 9;
95 if (list_empty(&mdev
->read_ee
))
96 wake_up(&mdev
->ee_wait
);
97 if (test_bit(__EE_WAS_ERROR
, &e
->flags
))
98 __drbd_chk_io_error(mdev
, FALSE
);
99 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
101 drbd_queue_work(&mdev
->data
.work
, &e
->w
);
105 /* writes on behalf of the partner, or resync writes,
106 * "submitted" by the receiver, final stage. */
107 static void drbd_endio_write_sec_final(struct drbd_epoch_entry
*e
) __releases(local
)
109 unsigned long flags
= 0;
110 struct drbd_conf
*mdev
= e
->mdev
;
114 int do_al_complete_io
;
116 D_ASSERT(e
->block_id
!= ID_VACANT
);
118 /* after we moved e to done_ee,
119 * we may no longer access it,
120 * it may be freed/reused already!
121 * (as soon as we release the req_lock) */
122 e_sector
= e
->sector
;
123 do_al_complete_io
= e
->flags
& EE_CALL_AL_COMPLETE_IO
;
124 is_syncer_req
= is_syncer_block_id(e
->block_id
);
126 spin_lock_irqsave(&mdev
->req_lock
, flags
);
127 mdev
->writ_cnt
+= e
->size
>> 9;
128 list_del(&e
->w
.list
); /* has been on active_ee or sync_ee */
129 list_add_tail(&e
->w
.list
, &mdev
->done_ee
);
131 /* No hlist_del_init(&e->colision) here, we did not send the Ack yet,
132 * neither did we wake possibly waiting conflicting requests.
133 * done from "drbd_process_done_ee" within the appropriate w.cb
134 * (e_end_block/e_end_resync_block) or from _drbd_clear_done_ee */
136 do_wake
= is_syncer_req
137 ? list_empty(&mdev
->sync_ee
)
138 : list_empty(&mdev
->active_ee
);
140 if (test_bit(__EE_WAS_ERROR
, &e
->flags
))
141 __drbd_chk_io_error(mdev
, FALSE
);
142 spin_unlock_irqrestore(&mdev
->req_lock
, flags
);
145 drbd_rs_complete_io(mdev
, e_sector
);
148 wake_up(&mdev
->ee_wait
);
150 if (do_al_complete_io
)
151 drbd_al_complete_io(mdev
, e_sector
);
157 /* writes on behalf of the partner, or resync writes,
158 * "submitted" by the receiver.
160 void drbd_endio_sec(struct bio
*bio
, int error
)
162 struct drbd_epoch_entry
*e
= bio
->bi_private
;
163 struct drbd_conf
*mdev
= e
->mdev
;
164 int uptodate
= bio_flagged(bio
, BIO_UPTODATE
);
165 int is_write
= bio_data_dir(bio
) == WRITE
;
168 dev_warn(DEV
, "%s: error=%d s=%llus\n",
169 is_write
? "write" : "read", error
,
170 (unsigned long long)e
->sector
);
171 if (!error
&& !uptodate
) {
172 dev_warn(DEV
, "%s: setting error to -EIO s=%llus\n",
173 is_write
? "write" : "read",
174 (unsigned long long)e
->sector
);
175 /* strange behavior of some lower level drivers...
176 * fail the request by clearing the uptodate flag,
177 * but do not return any error?! */
182 set_bit(__EE_WAS_ERROR
, &e
->flags
);
184 bio_put(bio
); /* no need for the bio anymore */
185 if (atomic_dec_and_test(&e
->pending_bios
)) {
187 drbd_endio_write_sec_final(e
);
189 drbd_endio_read_sec_final(e
);
193 /* read, readA or write requests on R_PRIMARY coming from drbd_make_request
195 void drbd_endio_pri(struct bio
*bio
, int error
)
197 struct drbd_request
*req
= bio
->bi_private
;
198 struct drbd_conf
*mdev
= req
->mdev
;
199 enum drbd_req_event what
;
200 int uptodate
= bio_flagged(bio
, BIO_UPTODATE
);
202 if (!error
&& !uptodate
) {
203 dev_warn(DEV
, "p %s: setting error to -EIO\n",
204 bio_data_dir(bio
) == WRITE
? "write" : "read");
205 /* strange behavior of some lower level drivers...
206 * fail the request by clearing the uptodate flag,
207 * but do not return any error?! */
211 /* to avoid recursion in __req_mod */
212 if (unlikely(error
)) {
213 what
= (bio_data_dir(bio
) == WRITE
)
214 ? write_completed_with_error
215 : (bio_rw(bio
) == READ
)
216 ? read_completed_with_error
217 : read_ahead_completed_with_error
;
221 bio_put(req
->private_bio
);
222 req
->private_bio
= ERR_PTR(error
);
227 int w_read_retry_remote(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
229 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
231 /* We should not detach for read io-error,
232 * but try to WRITE the P_DATA_REPLY to the failed location,
233 * to give the disk the chance to relocate that block */
235 spin_lock_irq(&mdev
->req_lock
);
236 if (cancel
|| mdev
->state
.pdsk
!= D_UP_TO_DATE
) {
237 _req_mod(req
, read_retry_remote_canceled
);
238 spin_unlock_irq(&mdev
->req_lock
);
241 spin_unlock_irq(&mdev
->req_lock
);
243 return w_send_read_req(mdev
, w
, 0);
246 int w_resync_inactive(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
248 ERR_IF(cancel
) return 1;
249 dev_err(DEV
, "resync inactive, but callback triggered??\n");
250 return 1; /* Simply ignore this! */
253 void drbd_csum_ee(struct drbd_conf
*mdev
, struct crypto_hash
*tfm
, struct drbd_epoch_entry
*e
, void *digest
)
255 struct hash_desc desc
;
256 struct scatterlist sg
;
257 struct page
*page
= e
->pages
;
264 sg_init_table(&sg
, 1);
265 crypto_hash_init(&desc
);
267 while ((tmp
= page_chain_next(page
))) {
268 /* all but the last page will be fully used */
269 sg_set_page(&sg
, page
, PAGE_SIZE
, 0);
270 crypto_hash_update(&desc
, &sg
, sg
.length
);
273 /* and now the last, possibly only partially used page */
274 len
= e
->size
& (PAGE_SIZE
- 1);
275 sg_set_page(&sg
, page
, len
?: PAGE_SIZE
, 0);
276 crypto_hash_update(&desc
, &sg
, sg
.length
);
277 crypto_hash_final(&desc
, digest
);
280 void drbd_csum_bio(struct drbd_conf
*mdev
, struct crypto_hash
*tfm
, struct bio
*bio
, void *digest
)
282 struct hash_desc desc
;
283 struct scatterlist sg
;
284 struct bio_vec
*bvec
;
290 sg_init_table(&sg
, 1);
291 crypto_hash_init(&desc
);
293 __bio_for_each_segment(bvec
, bio
, i
, 0) {
294 sg_set_page(&sg
, bvec
->bv_page
, bvec
->bv_len
, bvec
->bv_offset
);
295 crypto_hash_update(&desc
, &sg
, sg
.length
);
297 crypto_hash_final(&desc
, digest
);
300 static int w_e_send_csum(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
302 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
307 D_ASSERT(e
->block_id
== DRBD_MAGIC
+ 0xbeef);
309 if (unlikely(cancel
)) {
310 drbd_free_ee(mdev
, e
);
314 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
315 digest_size
= crypto_hash_digestsize(mdev
->csums_tfm
);
316 digest
= kmalloc(digest_size
, GFP_NOIO
);
318 drbd_csum_ee(mdev
, mdev
->csums_tfm
, e
, digest
);
320 inc_rs_pending(mdev
);
321 ok
= drbd_send_drequest_csum(mdev
,
329 dev_err(DEV
, "kmalloc() of digest failed.\n");
335 drbd_free_ee(mdev
, e
);
338 dev_err(DEV
, "drbd_send_drequest(..., csum) failed\n");
342 #define GFP_TRY (__GFP_HIGHMEM | __GFP_NOWARN)
344 static int read_for_csum(struct drbd_conf
*mdev
, sector_t sector
, int size
)
346 struct drbd_epoch_entry
*e
;
351 if (drbd_rs_should_slow_down(mdev
))
354 /* GFP_TRY, because if there is no memory available right now, this may
355 * be rescheduled for later. It is "only" background resync, after all. */
356 e
= drbd_alloc_ee(mdev
, DRBD_MAGIC
+0xbeef, sector
, size
, GFP_TRY
);
360 e
->w
.cb
= w_e_send_csum
;
361 spin_lock_irq(&mdev
->req_lock
);
362 list_add(&e
->w
.list
, &mdev
->read_ee
);
363 spin_unlock_irq(&mdev
->req_lock
);
365 atomic_add(size
>> 9, &mdev
->rs_sect_ev
);
366 if (drbd_submit_ee(mdev
, e
, READ
, DRBD_FAULT_RS_RD
) == 0)
369 /* drbd_submit_ee currently fails for one reason only:
370 * not being able to allocate enough bios.
371 * Is dropping the connection going to help? */
372 spin_lock_irq(&mdev
->req_lock
);
373 list_del(&e
->w
.list
);
374 spin_unlock_irq(&mdev
->req_lock
);
376 drbd_free_ee(mdev
, e
);
382 void resync_timer_fn(unsigned long data
)
384 struct drbd_conf
*mdev
= (struct drbd_conf
*) data
;
388 switch (mdev
->state
.conn
) {
390 mdev
->resync_work
.cb
= w_make_ov_request
;
393 mdev
->resync_work
.cb
= w_make_resync_request
;
397 mdev
->resync_work
.cb
= w_resync_inactive
;
400 /* harmless race: list_empty outside data.work.q_lock */
401 if (list_empty(&mdev
->resync_work
.list
) && queue
)
402 drbd_queue_work(&mdev
->data
.work
, &mdev
->resync_work
);
405 static void fifo_set(struct fifo_buffer
*fb
, int value
)
409 for (i
= 0; i
< fb
->size
; i
++)
410 fb
->values
[i
] = value
;
413 static int fifo_push(struct fifo_buffer
*fb
, int value
)
417 ov
= fb
->values
[fb
->head_index
];
418 fb
->values
[fb
->head_index
++] = value
;
420 if (fb
->head_index
>= fb
->size
)
426 static void fifo_add_val(struct fifo_buffer
*fb
, int value
)
430 for (i
= 0; i
< fb
->size
; i
++)
431 fb
->values
[i
] += value
;
434 int drbd_rs_controller(struct drbd_conf
*mdev
)
436 unsigned int sect_in
; /* Number of sectors that came in since the last turn */
437 unsigned int want
; /* The number of sectors we want in the proxy */
438 int req_sect
; /* Number of sectors to request in this turn */
439 int correction
; /* Number of sectors more we need in the proxy*/
440 int cps
; /* correction per invocation of drbd_rs_controller() */
441 int steps
; /* Number of time steps to plan ahead */
445 sect_in
= atomic_xchg(&mdev
->rs_sect_in
, 0); /* Number of sectors that came in */
446 mdev
->rs_in_flight
-= sect_in
;
448 spin_lock(&mdev
->peer_seq_lock
); /* get an atomic view on mdev->rs_plan_s */
450 steps
= mdev
->rs_plan_s
.size
; /* (mdev->sync_conf.c_plan_ahead * 10 * SLEEP_TIME) / HZ; */
452 if (mdev
->rs_in_flight
+ sect_in
== 0) { /* At start of resync */
453 want
= ((mdev
->sync_conf
.rate
* 2 * SLEEP_TIME
) / HZ
) * steps
;
454 } else { /* normal path */
455 want
= mdev
->sync_conf
.c_fill_target
? mdev
->sync_conf
.c_fill_target
:
456 sect_in
* mdev
->sync_conf
.c_delay_target
* HZ
/ (SLEEP_TIME
* 10);
459 correction
= want
- mdev
->rs_in_flight
- mdev
->rs_planed
;
462 cps
= correction
/ steps
;
463 fifo_add_val(&mdev
->rs_plan_s
, cps
);
464 mdev
->rs_planed
+= cps
* steps
;
466 /* What we do in this step */
467 curr_corr
= fifo_push(&mdev
->rs_plan_s
, 0);
468 spin_unlock(&mdev
->peer_seq_lock
);
469 mdev
->rs_planed
-= curr_corr
;
471 req_sect
= sect_in
+ curr_corr
;
475 max_sect
= (mdev
->sync_conf
.c_max_rate
* 2 * SLEEP_TIME
) / HZ
;
476 if (req_sect
> max_sect
)
480 dev_warn(DEV, "si=%u if=%d wa=%u co=%d st=%d cps=%d pl=%d cc=%d rs=%d\n",
481 sect_in, mdev->rs_in_flight, want, correction,
482 steps, cps, mdev->rs_planed, curr_corr, req_sect);
488 int w_make_resync_request(struct drbd_conf
*mdev
,
489 struct drbd_work
*w
, int cancel
)
493 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
494 int max_segment_size
;
495 int number
, rollback_i
, size
, pe
, mx
;
496 int align
, queued
, sndbuf
;
499 if (unlikely(cancel
))
502 if (unlikely(mdev
->state
.conn
< C_CONNECTED
)) {
503 dev_err(DEV
, "Confused in w_make_resync_request()! cstate < Connected");
507 if (mdev
->state
.conn
!= C_SYNC_TARGET
)
508 dev_err(DEV
, "%s in w_make_resync_request\n",
509 drbd_conn_str(mdev
->state
.conn
));
511 if (mdev
->rs_total
== 0) {
513 drbd_resync_finished(mdev
);
517 if (!get_ldev(mdev
)) {
518 /* Since we only need to access mdev->rsync a
519 get_ldev_if_state(mdev,D_FAILED) would be sufficient, but
520 to continue resync with a broken disk makes no sense at
522 dev_err(DEV
, "Disk broke down during resync!\n");
523 mdev
->resync_work
.cb
= w_resync_inactive
;
527 /* starting with drbd 8.3.8, we can handle multi-bio EEs,
528 * if it should be necessary */
530 mdev
->agreed_pro_version
< 94 ? queue_max_segment_size(mdev
->rq_queue
) :
531 mdev
->agreed_pro_version
< 95 ? DRBD_MAX_SIZE_H80_PACKET
: DRBD_MAX_SEGMENT_SIZE
;
533 if (mdev
->rs_plan_s
.size
) { /* mdev->sync_conf.c_plan_ahead */
534 number
= drbd_rs_controller(mdev
) >> (BM_BLOCK_SHIFT
- 9);
535 mdev
->c_sync_rate
= number
* HZ
* (BM_BLOCK_SIZE
/ 1024) / SLEEP_TIME
;
537 mdev
->c_sync_rate
= mdev
->sync_conf
.rate
;
538 number
= SLEEP_TIME
* mdev
->c_sync_rate
/ ((BM_BLOCK_SIZE
/ 1024) * HZ
);
541 /* Throttle resync on lower level disk activity, which may also be
542 * caused by application IO on Primary/SyncTarget.
543 * Keep this after the call to drbd_rs_controller, as that assumes
544 * to be called as precisely as possible every SLEEP_TIME,
545 * and would be confused otherwise. */
546 if (drbd_rs_should_slow_down(mdev
))
549 mutex_lock(&mdev
->data
.mutex
);
550 if (mdev
->data
.socket
)
551 mx
= mdev
->data
.socket
->sk
->sk_rcvbuf
/ sizeof(struct p_block_req
);
554 mutex_unlock(&mdev
->data
.mutex
);
556 /* For resync rates >160MB/sec, allow more pending RS requests */
560 /* Limit the number of pending RS requests to no more than the peer's receive buffer */
561 pe
= atomic_read(&mdev
->rs_pending_cnt
);
562 if ((pe
+ number
) > mx
) {
566 for (i
= 0; i
< number
; i
++) {
567 /* Stop generating RS requests, when half of the send buffer is filled */
568 mutex_lock(&mdev
->data
.mutex
);
569 if (mdev
->data
.socket
) {
570 queued
= mdev
->data
.socket
->sk
->sk_wmem_queued
;
571 sndbuf
= mdev
->data
.socket
->sk
->sk_sndbuf
;
576 mutex_unlock(&mdev
->data
.mutex
);
577 if (queued
> sndbuf
/ 2)
581 size
= BM_BLOCK_SIZE
;
582 bit
= drbd_bm_find_next(mdev
, mdev
->bm_resync_fo
);
585 mdev
->bm_resync_fo
= drbd_bm_bits(mdev
);
586 mdev
->resync_work
.cb
= w_resync_inactive
;
591 sector
= BM_BIT_TO_SECT(bit
);
593 if (drbd_try_rs_begin_io(mdev
, sector
)) {
594 mdev
->bm_resync_fo
= bit
;
597 mdev
->bm_resync_fo
= bit
+ 1;
599 if (unlikely(drbd_bm_test_bit(mdev
, bit
) == 0)) {
600 drbd_rs_complete_io(mdev
, sector
);
604 #if DRBD_MAX_SEGMENT_SIZE > BM_BLOCK_SIZE
605 /* try to find some adjacent bits.
606 * we stop if we have already the maximum req size.
608 * Additionally always align bigger requests, in order to
609 * be prepared for all stripe sizes of software RAIDs.
614 if (size
+ BM_BLOCK_SIZE
> max_segment_size
)
617 /* Be always aligned */
618 if (sector
& ((1<<(align
+3))-1))
621 /* do not cross extent boundaries */
622 if (((bit
+1) & BM_BLOCKS_PER_BM_EXT_MASK
) == 0)
624 /* now, is it actually dirty, after all?
625 * caution, drbd_bm_test_bit is tri-state for some
626 * obscure reason; ( b == 0 ) would get the out-of-band
627 * only accidentally right because of the "oddly sized"
628 * adjustment below */
629 if (drbd_bm_test_bit(mdev
, bit
+1) != 1)
632 size
+= BM_BLOCK_SIZE
;
633 if ((BM_BLOCK_SIZE
<< align
) <= size
)
637 /* if we merged some,
638 * reset the offset to start the next drbd_bm_find_next from */
639 if (size
> BM_BLOCK_SIZE
)
640 mdev
->bm_resync_fo
= bit
+ 1;
643 /* adjust very last sectors, in case we are oddly sized */
644 if (sector
+ (size
>>9) > capacity
)
645 size
= (capacity
-sector
)<<9;
646 if (mdev
->agreed_pro_version
>= 89 && mdev
->csums_tfm
) {
647 switch (read_for_csum(mdev
, sector
, size
)) {
648 case -EIO
: /* Disk failure */
651 case -EAGAIN
: /* allocation failed, or ldev busy */
652 drbd_rs_complete_io(mdev
, sector
);
653 mdev
->bm_resync_fo
= BM_SECT_TO_BIT(sector
);
663 inc_rs_pending(mdev
);
664 if (!drbd_send_drequest(mdev
, P_RS_DATA_REQUEST
,
665 sector
, size
, ID_SYNCER
)) {
666 dev_err(DEV
, "drbd_send_drequest() failed, aborting...\n");
667 dec_rs_pending(mdev
);
674 if (mdev
->bm_resync_fo
>= drbd_bm_bits(mdev
)) {
675 /* last syncer _request_ was sent,
676 * but the P_RS_DATA_REPLY not yet received. sync will end (and
677 * next sync group will resume), as soon as we receive the last
678 * resync data block, and the last bit is cleared.
679 * until then resync "work" is "inactive" ...
681 mdev
->resync_work
.cb
= w_resync_inactive
;
687 mdev
->rs_in_flight
+= (i
<< (BM_BLOCK_SHIFT
- 9));
688 mod_timer(&mdev
->resync_timer
, jiffies
+ SLEEP_TIME
);
693 static int w_make_ov_request(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
697 const sector_t capacity
= drbd_get_capacity(mdev
->this_bdev
);
699 if (unlikely(cancel
))
702 if (unlikely(mdev
->state
.conn
< C_CONNECTED
)) {
703 dev_err(DEV
, "Confused in w_make_ov_request()! cstate < Connected");
707 number
= SLEEP_TIME
*mdev
->sync_conf
.rate
/ ((BM_BLOCK_SIZE
/1024)*HZ
);
708 if (atomic_read(&mdev
->rs_pending_cnt
) > number
)
711 number
-= atomic_read(&mdev
->rs_pending_cnt
);
713 sector
= mdev
->ov_position
;
714 for (i
= 0; i
< number
; i
++) {
715 if (sector
>= capacity
) {
716 mdev
->resync_work
.cb
= w_resync_inactive
;
720 size
= BM_BLOCK_SIZE
;
722 if (drbd_try_rs_begin_io(mdev
, sector
)) {
723 mdev
->ov_position
= sector
;
727 if (sector
+ (size
>>9) > capacity
)
728 size
= (capacity
-sector
)<<9;
730 inc_rs_pending(mdev
);
731 if (!drbd_send_ov_request(mdev
, sector
, size
)) {
732 dec_rs_pending(mdev
);
735 sector
+= BM_SECT_PER_BIT
;
737 mdev
->ov_position
= sector
;
740 mod_timer(&mdev
->resync_timer
, jiffies
+ SLEEP_TIME
);
745 int w_ov_finished(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
749 drbd_resync_finished(mdev
);
754 static int w_resync_finished(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
758 drbd_resync_finished(mdev
);
763 static void ping_peer(struct drbd_conf
*mdev
)
765 clear_bit(GOT_PING_ACK
, &mdev
->flags
);
767 wait_event(mdev
->misc_wait
,
768 test_bit(GOT_PING_ACK
, &mdev
->flags
) || mdev
->state
.conn
< C_CONNECTED
);
771 int drbd_resync_finished(struct drbd_conf
*mdev
)
773 unsigned long db
, dt
, dbdt
;
775 union drbd_state os
, ns
;
777 char *khelper_cmd
= NULL
;
779 /* Remove all elements from the resync LRU. Since future actions
780 * might set bits in the (main) bitmap, then the entries in the
781 * resync LRU would be wrong. */
782 if (drbd_rs_del_all(mdev
)) {
783 /* In case this is not possible now, most probably because
784 * there are P_RS_DATA_REPLY Packets lingering on the worker's
785 * queue (or even the read operations for those packets
786 * is not finished by now). Retry in 100ms. */
789 __set_current_state(TASK_INTERRUPTIBLE
);
790 schedule_timeout(HZ
/ 10);
791 w
= kmalloc(sizeof(struct drbd_work
), GFP_ATOMIC
);
793 w
->cb
= w_resync_finished
;
794 drbd_queue_work(&mdev
->data
.work
, w
);
797 dev_err(DEV
, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
800 dt
= (jiffies
- mdev
->rs_start
- mdev
->rs_paused
) / HZ
;
804 dbdt
= Bit2KB(db
/dt
);
805 mdev
->rs_paused
/= HZ
;
812 spin_lock_irq(&mdev
->req_lock
);
815 /* This protects us against multiple calls (that can happen in the presence
816 of application IO), and against connectivity loss just before we arrive here. */
817 if (os
.conn
<= C_CONNECTED
)
821 ns
.conn
= C_CONNECTED
;
823 dev_info(DEV
, "%s done (total %lu sec; paused %lu sec; %lu K/sec)\n",
824 (os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) ?
825 "Online verify " : "Resync",
826 dt
+ mdev
->rs_paused
, mdev
->rs_paused
, dbdt
);
828 n_oos
= drbd_bm_total_weight(mdev
);
830 if (os
.conn
== C_VERIFY_S
|| os
.conn
== C_VERIFY_T
) {
832 dev_alert(DEV
, "Online verify found %lu %dk block out of sync!\n",
834 khelper_cmd
= "out-of-sync";
837 D_ASSERT((n_oos
- mdev
->rs_failed
) == 0);
839 if (os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_PAUSED_SYNC_T
)
840 khelper_cmd
= "after-resync-target";
842 if (mdev
->csums_tfm
&& mdev
->rs_total
) {
843 const unsigned long s
= mdev
->rs_same_csum
;
844 const unsigned long t
= mdev
->rs_total
;
847 (t
< 100000) ? ((s
*100)/t
) : (s
/(t
/100));
848 dev_info(DEV
, "%u %% had equal check sums, eliminated: %luK; "
849 "transferred %luK total %luK\n",
851 Bit2KB(mdev
->rs_same_csum
),
852 Bit2KB(mdev
->rs_total
- mdev
->rs_same_csum
),
853 Bit2KB(mdev
->rs_total
));
857 if (mdev
->rs_failed
) {
858 dev_info(DEV
, " %lu failed blocks\n", mdev
->rs_failed
);
860 if (os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_PAUSED_SYNC_T
) {
861 ns
.disk
= D_INCONSISTENT
;
862 ns
.pdsk
= D_UP_TO_DATE
;
864 ns
.disk
= D_UP_TO_DATE
;
865 ns
.pdsk
= D_INCONSISTENT
;
868 ns
.disk
= D_UP_TO_DATE
;
869 ns
.pdsk
= D_UP_TO_DATE
;
871 if (os
.conn
== C_SYNC_TARGET
|| os
.conn
== C_PAUSED_SYNC_T
) {
874 for (i
= UI_BITMAP
; i
<= UI_HISTORY_END
; i
++)
875 _drbd_uuid_set(mdev
, i
, mdev
->p_uuid
[i
]);
876 drbd_uuid_set(mdev
, UI_BITMAP
, mdev
->ldev
->md
.uuid
[UI_CURRENT
]);
877 _drbd_uuid_set(mdev
, UI_CURRENT
, mdev
->p_uuid
[UI_CURRENT
]);
879 dev_err(DEV
, "mdev->p_uuid is NULL! BUG\n");
883 drbd_uuid_set_bm(mdev
, 0UL);
886 /* Now the two UUID sets are equal, update what we
887 * know of the peer. */
889 for (i
= UI_CURRENT
; i
<= UI_HISTORY_END
; i
++)
890 mdev
->p_uuid
[i
] = mdev
->ldev
->md
.uuid
[i
];
894 _drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
896 spin_unlock_irq(&mdev
->req_lock
);
902 mdev
->ov_start_sector
= 0;
906 if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC
, &mdev
->flags
)) {
907 dev_info(DEV
, "Writing the whole bitmap\n");
908 drbd_queue_bitmap_io(mdev
, &drbd_bm_write
, NULL
, "write from resync_finished");
912 drbd_khelper(mdev
, khelper_cmd
);
918 static void move_to_net_ee_or_free(struct drbd_conf
*mdev
, struct drbd_epoch_entry
*e
)
920 if (drbd_ee_has_active_page(e
)) {
921 /* This might happen if sendpage() has not finished */
922 int i
= (e
->size
+ PAGE_SIZE
-1) >> PAGE_SHIFT
;
923 atomic_add(i
, &mdev
->pp_in_use_by_net
);
924 atomic_sub(i
, &mdev
->pp_in_use
);
925 spin_lock_irq(&mdev
->req_lock
);
926 list_add_tail(&e
->w
.list
, &mdev
->net_ee
);
927 spin_unlock_irq(&mdev
->req_lock
);
928 wake_up(&drbd_pp_wait
);
930 drbd_free_ee(mdev
, e
);
934 * w_e_end_data_req() - Worker callback, to send a P_DATA_REPLY packet in response to a P_DATA_REQUEST
935 * @mdev: DRBD device.
937 * @cancel: The connection will be closed anyways
939 int w_e_end_data_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
941 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
944 if (unlikely(cancel
)) {
945 drbd_free_ee(mdev
, e
);
950 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
951 ok
= drbd_send_block(mdev
, P_DATA_REPLY
, e
);
953 if (__ratelimit(&drbd_ratelimit_state
))
954 dev_err(DEV
, "Sending NegDReply. sector=%llus.\n",
955 (unsigned long long)e
->sector
);
957 ok
= drbd_send_ack(mdev
, P_NEG_DREPLY
, e
);
962 move_to_net_ee_or_free(mdev
, e
);
965 dev_err(DEV
, "drbd_send_block() failed\n");
970 * w_e_end_rsdata_req() - Worker callback to send a P_RS_DATA_REPLY packet in response to a P_RS_DATA_REQUESTRS
971 * @mdev: DRBD device.
973 * @cancel: The connection will be closed anyways
975 int w_e_end_rsdata_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
977 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
980 if (unlikely(cancel
)) {
981 drbd_free_ee(mdev
, e
);
986 if (get_ldev_if_state(mdev
, D_FAILED
)) {
987 drbd_rs_complete_io(mdev
, e
->sector
);
991 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
992 if (likely(mdev
->state
.pdsk
>= D_INCONSISTENT
)) {
993 inc_rs_pending(mdev
);
994 ok
= drbd_send_block(mdev
, P_RS_DATA_REPLY
, e
);
996 if (__ratelimit(&drbd_ratelimit_state
))
997 dev_err(DEV
, "Not sending RSDataReply, "
998 "partner DISKLESS!\n");
1002 if (__ratelimit(&drbd_ratelimit_state
))
1003 dev_err(DEV
, "Sending NegRSDReply. sector %llus.\n",
1004 (unsigned long long)e
->sector
);
1006 ok
= drbd_send_ack(mdev
, P_NEG_RS_DREPLY
, e
);
1008 /* update resync data with failure */
1009 drbd_rs_failed_io(mdev
, e
->sector
, e
->size
);
1014 move_to_net_ee_or_free(mdev
, e
);
1017 dev_err(DEV
, "drbd_send_block() failed\n");
1021 int w_e_end_csum_rs_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1023 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
1024 struct digest_info
*di
;
1026 void *digest
= NULL
;
1029 if (unlikely(cancel
)) {
1030 drbd_free_ee(mdev
, e
);
1035 if (get_ldev(mdev
)) {
1036 drbd_rs_complete_io(mdev
, e
->sector
);
1042 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1043 /* quick hack to try to avoid a race against reconfiguration.
1044 * a real fix would be much more involved,
1045 * introducing more locking mechanisms */
1046 if (mdev
->csums_tfm
) {
1047 digest_size
= crypto_hash_digestsize(mdev
->csums_tfm
);
1048 D_ASSERT(digest_size
== di
->digest_size
);
1049 digest
= kmalloc(digest_size
, GFP_NOIO
);
1052 drbd_csum_ee(mdev
, mdev
->csums_tfm
, e
, digest
);
1053 eq
= !memcmp(digest
, di
->digest
, digest_size
);
1058 drbd_set_in_sync(mdev
, e
->sector
, e
->size
);
1059 /* rs_same_csums unit is BM_BLOCK_SIZE */
1060 mdev
->rs_same_csum
+= e
->size
>> BM_BLOCK_SHIFT
;
1061 ok
= drbd_send_ack(mdev
, P_RS_IS_IN_SYNC
, e
);
1063 inc_rs_pending(mdev
);
1064 e
->block_id
= ID_SYNCER
; /* By setting block_id, digest pointer becomes invalid! */
1065 e
->flags
&= ~EE_HAS_DIGEST
; /* This e no longer has a digest pointer */
1067 ok
= drbd_send_block(mdev
, P_RS_DATA_REPLY
, e
);
1070 ok
= drbd_send_ack(mdev
, P_NEG_RS_DREPLY
, e
);
1071 if (__ratelimit(&drbd_ratelimit_state
))
1072 dev_err(DEV
, "Sending NegDReply. I guess it gets messy.\n");
1076 move_to_net_ee_or_free(mdev
, e
);
1079 dev_err(DEV
, "drbd_send_block/ack() failed\n");
1083 int w_e_end_ov_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1085 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
1090 if (unlikely(cancel
))
1093 if (unlikely((e
->flags
& EE_WAS_ERROR
) != 0))
1096 digest_size
= crypto_hash_digestsize(mdev
->verify_tfm
);
1097 /* FIXME if this allocation fails, online verify will not terminate! */
1098 digest
= kmalloc(digest_size
, GFP_NOIO
);
1100 drbd_csum_ee(mdev
, mdev
->verify_tfm
, e
, digest
);
1101 inc_rs_pending(mdev
);
1102 ok
= drbd_send_drequest_csum(mdev
, e
->sector
, e
->size
,
1103 digest
, digest_size
, P_OV_REPLY
);
1105 dec_rs_pending(mdev
);
1110 drbd_free_ee(mdev
, e
);
1117 void drbd_ov_oos_found(struct drbd_conf
*mdev
, sector_t sector
, int size
)
1119 if (mdev
->ov_last_oos_start
+ mdev
->ov_last_oos_size
== sector
) {
1120 mdev
->ov_last_oos_size
+= size
>>9;
1122 mdev
->ov_last_oos_start
= sector
;
1123 mdev
->ov_last_oos_size
= size
>>9;
1125 drbd_set_out_of_sync(mdev
, sector
, size
);
1126 set_bit(WRITE_BM_AFTER_RESYNC
, &mdev
->flags
);
1129 int w_e_end_ov_reply(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1131 struct drbd_epoch_entry
*e
= container_of(w
, struct drbd_epoch_entry
, w
);
1132 struct digest_info
*di
;
1137 if (unlikely(cancel
)) {
1138 drbd_free_ee(mdev
, e
);
1143 /* after "cancel", because after drbd_disconnect/drbd_rs_cancel_all
1144 * the resync lru has been cleaned up already */
1145 if (get_ldev(mdev
)) {
1146 drbd_rs_complete_io(mdev
, e
->sector
);
1152 if (likely((e
->flags
& EE_WAS_ERROR
) == 0)) {
1153 digest_size
= crypto_hash_digestsize(mdev
->verify_tfm
);
1154 digest
= kmalloc(digest_size
, GFP_NOIO
);
1156 drbd_csum_ee(mdev
, mdev
->verify_tfm
, e
, digest
);
1158 D_ASSERT(digest_size
== di
->digest_size
);
1159 eq
= !memcmp(digest
, di
->digest
, digest_size
);
1163 ok
= drbd_send_ack(mdev
, P_NEG_RS_DREPLY
, e
);
1164 if (__ratelimit(&drbd_ratelimit_state
))
1165 dev_err(DEV
, "Sending NegDReply. I guess it gets messy.\n");
1170 drbd_ov_oos_found(mdev
, e
->sector
, e
->size
);
1174 ok
= drbd_send_ack_ex(mdev
, P_OV_RESULT
, e
->sector
, e
->size
,
1175 eq
? ID_IN_SYNC
: ID_OUT_OF_SYNC
);
1177 drbd_free_ee(mdev
, e
);
1179 if (--mdev
->ov_left
== 0) {
1181 drbd_resync_finished(mdev
);
1187 int w_prev_work_done(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1189 struct drbd_wq_barrier
*b
= container_of(w
, struct drbd_wq_barrier
, w
);
1194 int w_send_barrier(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1196 struct drbd_tl_epoch
*b
= container_of(w
, struct drbd_tl_epoch
, w
);
1197 struct p_barrier
*p
= &mdev
->data
.sbuf
.barrier
;
1200 /* really avoid racing with tl_clear. w.cb may have been referenced
1201 * just before it was reassigned and re-queued, so double check that.
1202 * actually, this race was harmless, since we only try to send the
1203 * barrier packet here, and otherwise do nothing with the object.
1204 * but compare with the head of w_clear_epoch */
1205 spin_lock_irq(&mdev
->req_lock
);
1206 if (w
->cb
!= w_send_barrier
|| mdev
->state
.conn
< C_CONNECTED
)
1208 spin_unlock_irq(&mdev
->req_lock
);
1212 if (!drbd_get_data_sock(mdev
))
1214 p
->barrier
= b
->br_number
;
1215 /* inc_ap_pending was done where this was queued.
1216 * dec_ap_pending will be done in got_BarrierAck
1217 * or (on connection loss) in w_clear_epoch. */
1218 ok
= _drbd_send_cmd(mdev
, mdev
->data
.socket
, P_BARRIER
,
1219 (struct p_header80
*)p
, sizeof(*p
), 0);
1220 drbd_put_data_sock(mdev
);
1225 int w_send_write_hint(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1229 return drbd_send_short_cmd(mdev
, P_UNPLUG_REMOTE
);
1233 * w_send_dblock() - Worker callback to send a P_DATA packet in order to mirror a write request
1234 * @mdev: DRBD device.
1236 * @cancel: The connection will be closed anyways
1238 int w_send_dblock(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1240 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1243 if (unlikely(cancel
)) {
1244 req_mod(req
, send_canceled
);
1248 ok
= drbd_send_dblock(mdev
, req
);
1249 req_mod(req
, ok
? handed_over_to_network
: send_failed
);
1255 * w_send_read_req() - Worker callback to send a read request (P_DATA_REQUEST) packet
1256 * @mdev: DRBD device.
1258 * @cancel: The connection will be closed anyways
1260 int w_send_read_req(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1262 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1265 if (unlikely(cancel
)) {
1266 req_mod(req
, send_canceled
);
1270 ok
= drbd_send_drequest(mdev
, P_DATA_REQUEST
, req
->sector
, req
->size
,
1271 (unsigned long)req
);
1274 /* ?? we set C_TIMEOUT or C_BROKEN_PIPE in drbd_send();
1275 * so this is probably redundant */
1276 if (mdev
->state
.conn
>= C_CONNECTED
)
1277 drbd_force_state(mdev
, NS(conn
, C_NETWORK_FAILURE
));
1279 req_mod(req
, ok
? handed_over_to_network
: send_failed
);
1284 int w_restart_disk_io(struct drbd_conf
*mdev
, struct drbd_work
*w
, int cancel
)
1286 struct drbd_request
*req
= container_of(w
, struct drbd_request
, w
);
1288 if (bio_data_dir(req
->master_bio
) == WRITE
&& req
->rq_state
& RQ_IN_ACT_LOG
)
1289 drbd_al_begin_io(mdev
, req
->sector
);
1290 /* Calling drbd_al_begin_io() out of the worker might deadlocks
1291 theoretically. Practically it can not deadlock, since this is
1292 only used when unfreezing IOs. All the extents of the requests
1293 that made it into the TL are already active */
1295 drbd_req_make_private_bio(req
, req
->master_bio
);
1296 req
->private_bio
->bi_bdev
= mdev
->ldev
->backing_bdev
;
1297 generic_make_request(req
->private_bio
);
1302 static int _drbd_may_sync_now(struct drbd_conf
*mdev
)
1304 struct drbd_conf
*odev
= mdev
;
1307 if (odev
->sync_conf
.after
== -1)
1309 odev
= minor_to_mdev(odev
->sync_conf
.after
);
1310 ERR_IF(!odev
) return 1;
1311 if ((odev
->state
.conn
>= C_SYNC_SOURCE
&&
1312 odev
->state
.conn
<= C_PAUSED_SYNC_T
) ||
1313 odev
->state
.aftr_isp
|| odev
->state
.peer_isp
||
1314 odev
->state
.user_isp
)
1320 * _drbd_pause_after() - Pause resync on all devices that may not resync now
1321 * @mdev: DRBD device.
1323 * Called from process context only (admin command and after_state_ch).
1325 static int _drbd_pause_after(struct drbd_conf
*mdev
)
1327 struct drbd_conf
*odev
;
1330 for (i
= 0; i
< minor_count
; i
++) {
1331 odev
= minor_to_mdev(i
);
1334 if (odev
->state
.conn
== C_STANDALONE
&& odev
->state
.disk
== D_DISKLESS
)
1336 if (!_drbd_may_sync_now(odev
))
1337 rv
|= (__drbd_set_state(_NS(odev
, aftr_isp
, 1), CS_HARD
, NULL
)
1338 != SS_NOTHING_TO_DO
);
1345 * _drbd_resume_next() - Resume resync on all devices that may resync now
1346 * @mdev: DRBD device.
1348 * Called from process context only (admin command and worker).
1350 static int _drbd_resume_next(struct drbd_conf
*mdev
)
1352 struct drbd_conf
*odev
;
1355 for (i
= 0; i
< minor_count
; i
++) {
1356 odev
= minor_to_mdev(i
);
1359 if (odev
->state
.conn
== C_STANDALONE
&& odev
->state
.disk
== D_DISKLESS
)
1361 if (odev
->state
.aftr_isp
) {
1362 if (_drbd_may_sync_now(odev
))
1363 rv
|= (__drbd_set_state(_NS(odev
, aftr_isp
, 0),
1365 != SS_NOTHING_TO_DO
) ;
1371 void resume_next_sg(struct drbd_conf
*mdev
)
1373 write_lock_irq(&global_state_lock
);
1374 _drbd_resume_next(mdev
);
1375 write_unlock_irq(&global_state_lock
);
1378 void suspend_other_sg(struct drbd_conf
*mdev
)
1380 write_lock_irq(&global_state_lock
);
1381 _drbd_pause_after(mdev
);
1382 write_unlock_irq(&global_state_lock
);
1385 static int sync_after_error(struct drbd_conf
*mdev
, int o_minor
)
1387 struct drbd_conf
*odev
;
1391 if (o_minor
< -1 || minor_to_mdev(o_minor
) == NULL
)
1392 return ERR_SYNC_AFTER
;
1394 /* check for loops */
1395 odev
= minor_to_mdev(o_minor
);
1398 return ERR_SYNC_AFTER_CYCLE
;
1400 /* dependency chain ends here, no cycles. */
1401 if (odev
->sync_conf
.after
== -1)
1404 /* follow the dependency chain */
1405 odev
= minor_to_mdev(odev
->sync_conf
.after
);
1409 int drbd_alter_sa(struct drbd_conf
*mdev
, int na
)
1414 write_lock_irq(&global_state_lock
);
1415 retcode
= sync_after_error(mdev
, na
);
1416 if (retcode
== NO_ERROR
) {
1417 mdev
->sync_conf
.after
= na
;
1419 changes
= _drbd_pause_after(mdev
);
1420 changes
|= _drbd_resume_next(mdev
);
1423 write_unlock_irq(&global_state_lock
);
1428 * drbd_start_resync() - Start the resync process
1429 * @mdev: DRBD device.
1430 * @side: Either C_SYNC_SOURCE or C_SYNC_TARGET
1432 * This function might bring you directly into one of the
1433 * C_PAUSED_SYNC_* states.
1435 void drbd_start_resync(struct drbd_conf
*mdev
, enum drbd_conns side
)
1437 union drbd_state ns
;
1440 if (mdev
->state
.conn
>= C_SYNC_SOURCE
) {
1441 dev_err(DEV
, "Resync already running!\n");
1445 /* In case a previous resync run was aborted by an IO error/detach on the peer. */
1446 drbd_rs_cancel_all(mdev
);
1448 if (side
== C_SYNC_TARGET
) {
1449 /* Since application IO was locked out during C_WF_BITMAP_T and
1450 C_WF_SYNC_UUID we are still unmodified. Before going to C_SYNC_TARGET
1451 we check that we might make the data inconsistent. */
1452 r
= drbd_khelper(mdev
, "before-resync-target");
1453 r
= (r
>> 8) & 0xff;
1455 dev_info(DEV
, "before-resync-target handler returned %d, "
1456 "dropping connection.\n", r
);
1457 drbd_force_state(mdev
, NS(conn
, C_DISCONNECTING
));
1462 drbd_state_lock(mdev
);
1464 if (!get_ldev_if_state(mdev
, D_NEGOTIATING
)) {
1465 drbd_state_unlock(mdev
);
1469 if (side
== C_SYNC_TARGET
) {
1470 mdev
->bm_resync_fo
= 0;
1471 } else /* side == C_SYNC_SOURCE */ {
1474 get_random_bytes(&uuid
, sizeof(u64
));
1475 drbd_uuid_set(mdev
, UI_BITMAP
, uuid
);
1476 drbd_send_sync_uuid(mdev
, uuid
);
1478 D_ASSERT(mdev
->state
.disk
== D_UP_TO_DATE
);
1481 write_lock_irq(&global_state_lock
);
1484 ns
.aftr_isp
= !_drbd_may_sync_now(mdev
);
1488 if (side
== C_SYNC_TARGET
)
1489 ns
.disk
= D_INCONSISTENT
;
1490 else /* side == C_SYNC_SOURCE */
1491 ns
.pdsk
= D_INCONSISTENT
;
1493 r
= __drbd_set_state(mdev
, ns
, CS_VERBOSE
, NULL
);
1496 if (ns
.conn
< C_CONNECTED
)
1497 r
= SS_UNKNOWN_ERROR
;
1499 if (r
== SS_SUCCESS
) {
1500 unsigned long tw
= drbd_bm_total_weight(mdev
);
1501 unsigned long now
= jiffies
;
1504 mdev
->rs_failed
= 0;
1505 mdev
->rs_paused
= 0;
1506 mdev
->rs_same_csum
= 0;
1507 mdev
->rs_last_events
= 0;
1508 mdev
->rs_last_sect_ev
= 0;
1509 mdev
->rs_total
= tw
;
1510 mdev
->rs_start
= now
;
1511 for (i
= 0; i
< DRBD_SYNC_MARKS
; i
++) {
1512 mdev
->rs_mark_left
[i
] = tw
;
1513 mdev
->rs_mark_time
[i
] = now
;
1515 _drbd_pause_after(mdev
);
1517 write_unlock_irq(&global_state_lock
);
1520 if (r
== SS_SUCCESS
) {
1521 dev_info(DEV
, "Began resync as %s (will sync %lu KB [%lu bits set]).\n",
1522 drbd_conn_str(ns
.conn
),
1523 (unsigned long) mdev
->rs_total
<< (BM_BLOCK_SHIFT
-10),
1524 (unsigned long) mdev
->rs_total
);
1526 if (mdev
->agreed_pro_version
< 95 && mdev
->rs_total
== 0) {
1527 /* This still has a race (about when exactly the peers
1528 * detect connection loss) that can lead to a full sync
1529 * on next handshake. In 8.3.9 we fixed this with explicit
1530 * resync-finished notifications, but the fix
1531 * introduces a protocol change. Sleeping for some
1532 * time longer than the ping interval + timeout on the
1533 * SyncSource, to give the SyncTarget the chance to
1534 * detect connection loss, then waiting for a ping
1535 * response (implicit in drbd_resync_finished) reduces
1536 * the race considerably, but does not solve it. */
1537 if (side
== C_SYNC_SOURCE
)
1538 schedule_timeout_interruptible(
1539 mdev
->net_conf
->ping_int
* HZ
+
1540 mdev
->net_conf
->ping_timeo
*HZ
/9);
1541 drbd_resync_finished(mdev
);
1544 atomic_set(&mdev
->rs_sect_in
, 0);
1545 atomic_set(&mdev
->rs_sect_ev
, 0);
1546 mdev
->rs_in_flight
= 0;
1547 mdev
->rs_planed
= 0;
1548 spin_lock(&mdev
->peer_seq_lock
);
1549 fifo_set(&mdev
->rs_plan_s
, 0);
1550 spin_unlock(&mdev
->peer_seq_lock
);
1551 /* ns.conn may already be != mdev->state.conn,
1552 * we may have been paused in between, or become paused until
1553 * the timer triggers.
1554 * No matter, that is handled in resync_timer_fn() */
1555 if (ns
.conn
== C_SYNC_TARGET
)
1556 mod_timer(&mdev
->resync_timer
, jiffies
);
1560 drbd_state_unlock(mdev
);
1563 int drbd_worker(struct drbd_thread
*thi
)
1565 struct drbd_conf
*mdev
= thi
->mdev
;
1566 struct drbd_work
*w
= NULL
;
1567 LIST_HEAD(work_list
);
1570 sprintf(current
->comm
, "drbd%d_worker", mdev_to_minor(mdev
));
1572 while (get_t_state(thi
) == Running
) {
1573 drbd_thread_current_set_cpu(mdev
);
1575 if (down_trylock(&mdev
->data
.work
.s
)) {
1576 mutex_lock(&mdev
->data
.mutex
);
1577 if (mdev
->data
.socket
&& !mdev
->net_conf
->no_cork
)
1578 drbd_tcp_uncork(mdev
->data
.socket
);
1579 mutex_unlock(&mdev
->data
.mutex
);
1581 intr
= down_interruptible(&mdev
->data
.work
.s
);
1583 mutex_lock(&mdev
->data
.mutex
);
1584 if (mdev
->data
.socket
&& !mdev
->net_conf
->no_cork
)
1585 drbd_tcp_cork(mdev
->data
.socket
);
1586 mutex_unlock(&mdev
->data
.mutex
);
1590 D_ASSERT(intr
== -EINTR
);
1591 flush_signals(current
);
1592 ERR_IF (get_t_state(thi
) == Running
)
1597 if (get_t_state(thi
) != Running
)
1599 /* With this break, we have done a down() but not consumed
1600 the entry from the list. The cleanup code takes care of
1604 spin_lock_irq(&mdev
->data
.work
.q_lock
);
1605 ERR_IF(list_empty(&mdev
->data
.work
.q
)) {
1606 /* something terribly wrong in our logic.
1607 * we were able to down() the semaphore,
1608 * but the list is empty... doh.
1610 * what is the best thing to do now?
1611 * try again from scratch, restarting the receiver,
1612 * asender, whatnot? could break even more ugly,
1613 * e.g. when we are primary, but no good local data.
1615 * I'll try to get away just starting over this loop.
1617 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1620 w
= list_entry(mdev
->data
.work
.q
.next
, struct drbd_work
, list
);
1621 list_del_init(&w
->list
);
1622 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1624 if (!w
->cb(mdev
, w
, mdev
->state
.conn
< C_CONNECTED
)) {
1625 /* dev_warn(DEV, "worker: a callback failed! \n"); */
1626 if (mdev
->state
.conn
>= C_CONNECTED
)
1627 drbd_force_state(mdev
,
1628 NS(conn
, C_NETWORK_FAILURE
));
1631 D_ASSERT(test_bit(DEVICE_DYING
, &mdev
->flags
));
1632 D_ASSERT(test_bit(CONFIG_PENDING
, &mdev
->flags
));
1634 spin_lock_irq(&mdev
->data
.work
.q_lock
);
1636 while (!list_empty(&mdev
->data
.work
.q
)) {
1637 list_splice_init(&mdev
->data
.work
.q
, &work_list
);
1638 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1640 while (!list_empty(&work_list
)) {
1641 w
= list_entry(work_list
.next
, struct drbd_work
, list
);
1642 list_del_init(&w
->list
);
1644 i
++; /* dead debugging code */
1647 spin_lock_irq(&mdev
->data
.work
.q_lock
);
1649 sema_init(&mdev
->data
.work
.s
, 0);
1650 /* DANGEROUS race: if someone did queue his work within the spinlock,
1651 * but up() ed outside the spinlock, we could get an up() on the
1652 * semaphore without corresponding list entry.
1655 spin_unlock_irq(&mdev
->data
.work
.q_lock
);
1657 D_ASSERT(mdev
->state
.disk
== D_DISKLESS
&& mdev
->state
.conn
== C_STANDALONE
);
1658 /* _drbd_set_state only uses stop_nowait.
1659 * wait here for the Exiting receiver. */
1660 drbd_thread_stop(&mdev
->receiver
);
1661 drbd_mdev_cleanup(mdev
);
1663 dev_info(DEV
, "worker terminated\n");
1665 clear_bit(DEVICE_DYING
, &mdev
->flags
);
1666 clear_bit(CONFIG_PENDING
, &mdev
->flags
);
1667 wake_up(&mdev
->state_wait
);