device: reuse HTTP connections for MKCOL
[MogileFS-Server.git] / lib / MogileFS / Worker / Monitor.pm
blob3062cdbddd5656febb5e9ed7ca821919e75a8efa
1 package MogileFS::Worker::Monitor;
2 use strict;
3 use warnings;
5 use base 'MogileFS::Worker';
6 use fields (
7 'last_test_write', # devid -> time. time we last tried writing to a device.
8 'monitor_start', # main monitor start time
9 'skip_host', # hostid -> 1 if already noted dead (reset every loop)
10 'seen_hosts', # IP -> 1 (reset every loop)
11 'iow', # MogileFS::IOStatWatcher object
12 'prev_data', # DB data from previous run
13 'devutil', # Running tally of device utilization
14 'events', # Queue of state events
15 'refresh_state', # devid -> { used, total, callbacks }, temporary data in each refresh run
16 'have_masterdb', # Hint flag for if the master DB is available
17 'updateable_devices', # devid -> Device, avoids device table updates
18 'parent', # socketpair to parent process
19 'refresh_pending', # set if there was a manually-requested refresh
20 'db_monitor_ran', # We announce "monitor_just_ran" every time the
21 # device checks are run, but only if the DB has
22 # been checked inbetween.
25 use Danga::Socket 1.56;
26 use MogileFS::Config;
27 use MogileFS::Util qw(error debug encode_url_args apply_state_events_list);
28 use MogileFS::IOStatWatcher;
29 use MogileFS::Server;
30 use MogileFS::Connection::Parent;
31 use Digest::MD5 qw(md5_base64);
33 use constant UPDATE_DB_EVERY => 15;
35 sub new {
36 my ($class, $psock) = @_;
37 my $self = fields::new($class);
38 $self->SUPER::new($psock);
40 $self->{last_test_write} = {};
41 $self->{iow} = MogileFS::IOStatWatcher->new;
42 $self->{prev_data} = { domain => {}, class => {}, host => {},
43 device => {} };
44 $self->{devutil} = { cur => {}, prev => {}, tmp => {} };
45 $self->{events} = [];
46 $self->{have_masterdb} = 0;
47 return $self;
50 sub watchdog_timeout {
51 30;
54 # returns 1 if a DB update was attempted
55 # returns 0 immediately if the (device) monitor is already running
56 sub cache_refresh {
57 my $self = shift;
59 if ($self->{refresh_state}) {
60 debug("Monitor run in progress, will not check for DB updates");
61 return 0;
64 debug("Monitor running; checking DB for updates");
65 # "Fix" our local cache of this flag, so we always check the master DB.
66 MogileFS::Config->cache_server_setting('_master_db_alive', 1);
67 my $have_dbh = $self->validate_dbh;
68 if ($have_dbh && !$self->{have_masterdb}) {
69 $self->{have_masterdb} = 1;
70 $self->set_event('srvset', '_master_db_alive', { value => 1 });
71 } elsif (!$have_dbh) {
72 $self->{have_masterdb} = 0;
73 $self->set_event('srvset', '_master_db_alive', { value => 0 });
74 error("Cannot connect to master database!");
77 if ($have_dbh) {
78 my $db_data = $self->grab_all_data;
80 # Stack diffs to ship back later
81 $self->diff_data($db_data);
84 $self->send_events_to_parent;
85 $self->{db_monitor_ran} = 1;
87 return 1;
90 sub usage_refresh {
91 my ($self) = @_;
93 # prevent concurrent refresh
94 return if $self->{refresh_state};
96 debug("Monitor running; scanning usage files");
98 $self->{refresh_state} = {}; # devid -> ...
99 $self->{monitor_start} = Time::HiRes::time();
101 my $have_dbh = $self->validate_dbh;
103 # See if we should be allowed to update the device table rows.
104 if ($have_dbh && Mgd::get_store()->get_lock('mgfs:device_update', 0)) {
105 # Fetch the freshlist list of entries, to avoid excessive writes.
106 $self->{updateable_devices} = { map { $_->{devid} => $_ }
107 Mgd::get_store()->get_all_devices };
108 } else {
109 $self->{updateable_devices} = undef;
112 $self->{skip_host} = {}; # hostid -> 1 if already noted dead.
113 $self->{seen_hosts} = {}; # IP -> 1
115 my $dev_factory = MogileFS::Factory::Device->get_factory();
116 my $devutil = $self->{devutil};
118 $devutil->{tmp} = {};
119 # kick off check_device to test host/devs. diff against old values.
120 for my $dev ($dev_factory->get_all) {
121 if (my $state = $self->is_iow_diff($dev)) {
122 $self->state_event('device', $dev->id, {utilization => $state});
124 $devutil->{tmp}->{$dev->id} = $devutil->{cur}->{$dev->id};
126 $dev->can_read_from or next;
127 $self->check_device_begin($dev);
129 # we're done if we didn't schedule any work
130 $self->usage_refresh_done unless keys %{$self->{refresh_state}};
133 sub usage_refresh_done {
134 my ($self) = @_;
136 if ($self->{updateable_devices}) {
137 Mgd::get_store()->release_lock('mgfs:device_update');
138 $self->{updateable_devices} = undef;
141 $self->{devutil}->{prev} = $self->{devutil}->{tmp};
142 # Set the IOWatcher hosts (once old monitor code has been disabled)
144 $self->send_events_to_parent;
146 $self->{iow}->set_hosts(keys %{$self->{seen_hosts}});
148 foreach my $devid (keys %{$self->{refresh_state}}) {
149 error("device check incomplete for dev$devid");
152 my $start = delete $self->{monitor_start};
153 my $elapsed = Time::HiRes::time() - $start;
154 debug("device refresh finished after $elapsed");
156 $self->{refresh_state} = undef;
157 my $pending_since = $self->{refresh_pending};
159 # schedule another usage_refresh immediately if somebody requested it
160 # Don't announce :monitor_just_ran if somebody requested a refresh
161 # while we were running, we could've been refreshing on a stale DB
162 if ($pending_since && $pending_since > $start) {
163 # using AddTimer to schedule the refresh to avoid stack overflow
164 # since usage_refresh can call usage_refresh_done directly if
165 # there are no devices
166 Danga::Socket->AddTimer(0, sub {
167 $self->cache_refresh;
168 $self->usage_refresh;
172 # announce we're done if we ran on schedule, or we had a
173 # forced refresh that was requested before we started.
174 if (!$pending_since || $pending_since <= $start) {
175 # totally done refreshing, accept manual refresh requests again
176 $self->{parent}->watch_read(1);
177 delete $self->{refresh_pending};
178 if (delete $self->{db_monitor_ran} || $pending_since) {
179 $self->send_to_parent(":monitor_just_ran");
184 sub work {
185 my $self = shift;
187 # It makes sense to have monitor use a shorter timeout
188 # (conn_timeout) across the board to skip slow hosts. Other workers
189 # are less tolerant, and may use a higher value in node_timeout.
190 MogileFS::Config->set_config_no_broadcast("node_timeout", MogileFS::Config->config("conn_timeout"));
192 my $iow = $self->{iow};
193 $iow->on_stats(sub {
194 my ($hostname, $stats) = @_;
196 while (my ($devid, $util) = each %$stats) {
197 # Lets not propagate devices that we accidentally find.
198 my $dev = Mgd::device_factory()->get_by_id($devid);
199 next unless $dev;
200 $self->{devutil}->{cur}->{$devid} = $util;
204 my $db_monitor;
205 $db_monitor = sub {
206 $self->still_alive;
208 # reschedule immediately if we were blocked by main_monitor.
209 # setting refresh_pending will call cache_refresh again
210 if (!$self->cache_refresh) {
211 $self->{refresh_pending} ||= Time::HiRes::time();
214 # always reschedule in 4 seconds, regardless
215 Danga::Socket->AddTimer(4, $db_monitor);
218 $db_monitor->();
219 $self->read_from_parent;
221 my $main_monitor;
222 $main_monitor = sub {
223 $self->{parent}->ping;
224 $self->usage_refresh;
225 Danga::Socket->AddTimer(2.5, $main_monitor);
228 $self->parent_ping; # ensure we get the initial DB state back
229 $self->{parent} = MogileFS::Connection::Parent->new($self);
230 Danga::Socket->AddTimer(0, $main_monitor);
231 Danga::Socket->EventLoop;
234 sub process_line {
235 my MogileFS::Worker::Monitor $self = shift;
236 my $lineref = shift;
237 if ($$lineref =~ /^:refresh_monitor$/) {
238 if ($self->cache_refresh) {
239 $self->usage_refresh;
240 } else {
241 $self->{refresh_pending} ||= Time::HiRes::time();
243 # try to stop processing further refresh_monitor requests
244 # if we're acting on a manual refresh
245 $self->{parent}->watch_read(0);
246 return 1;
248 return 0;
251 # --------------------------------------------------------------------------
253 # Flattens and flips events up to the parent. Can be huge on startup!
254 # Events: set type foo=bar&baz=quux
255 # remove type id
256 # setstate type id foo=bar&baz=quux
257 # Combined: ev_mode=set&ev_type=device&foo=bar
258 # ev_mode=setstate&ev_type=device&ev_id=1&foo=bar
259 sub send_events_to_parent {
260 my $self = shift;
261 my @flat = ();
262 for my $ev (@{$self->{events}}) {
263 my ($mode, $type, $args) = @$ev;
264 $args->{ev_mode} = $mode;
265 $args->{ev_type} = $type;
266 push(@flat, encode_url_args($args));
268 return unless @flat;
269 $self->{events} = [];
272 # $events can be several MB, so let it go out-of-scope soon:
273 my $events = join(' ', ':monitor_events', @flat);
274 debug("sending state changes $events", 2);
275 $self->send_to_parent($events);
278 apply_state_events_list(@flat);
281 sub add_event {
282 push(@{$_[0]->{events}}, $_[1]);
285 sub set_event {
286 # Allow callers to use shorthand
287 $_[3]->{ev_id} = $_[2];
288 $_[0]->add_event(['set', $_[1], $_[3]]);
290 sub remove_event { $_[0]->add_event(['remove', $_[1], { ev_id => $_[2] }]); }
291 sub state_event {
292 $_[3]->{ev_id} = $_[2];
293 $_[0]->add_event(['setstate', $_[1], $_[3]]);
296 sub is_iow_diff {
297 my ($self, $dev) = @_;
298 my $devid = $dev->id;
299 my $p = $self->{devutil}->{prev}->{$devid};
300 my $c = $self->{devutil}->{cur}->{$devid};
301 if ( ! defined $p || $p ne $c ) {
302 return $c;
304 return undef;
307 sub diff_data {
308 my ($self, $db_data) = @_;
310 my $new_data = {};
311 my $prev_data = $self->{prev_data};
312 for my $type (keys %{$db_data}) {
313 my $d_data = $db_data->{$type};
314 my $p_data = $prev_data->{$type};
315 my $n_data = {};
317 for my $item (@{$d_data}) {
318 my $id = $type eq 'domain' ? $item->{dmid}
319 : $type eq 'class' ? $item->{dmid} . '-' . $item->{classid}
320 : $type eq 'host' ? $item->{hostid}
321 : $type eq 'device' ? $item->{devid}
322 : $type eq 'srvset' ? $item->{field}
323 : die "Unknown type";
324 my $old = delete $p_data->{$id};
325 # Special case: for devices, we don't care if mb_asof changes.
326 # FIXME: Change the grab routine (or filter there?).
327 delete $item->{mb_asof} if $type eq 'device';
328 if (!$old || $self->diff_hash($old, $item)) {
329 $self->set_event($type, $id, { %$item });
331 $n_data->{$id} = $item;
333 for my $id (keys %{$p_data}) {
334 $self->remove_event($type, $id);
337 $new_data->{$type} = $n_data;
339 $self->{prev_data} = $new_data;
342 # returns 1 if the hashes are different.
343 sub diff_hash {
344 my ($self, $old, $new) = @_;
346 my %keys = ();
347 map { $keys{$_}++ } keys %$old, keys %$new;
348 for my $k (keys %keys) {
349 return 1 if (exists $old->{$k} && ! exists $new->{$k});
350 return 1 if (exists $new->{$k} && ! exists $old->{$k});
351 return 1 if (defined $old->{$k} && ! defined $new->{$k});
352 return 1 if (defined $new->{$k} && ! defined $old->{$k});
353 next if (! defined $new->{$k} && ! defined $old->{$k});
354 return 1 if ($old->{$k} ne $new->{$k});
356 return 0;
359 sub grab_all_data {
360 my $self = shift;
361 my $sto = Mgd::get_store();
363 # Normalize the domain data to the rest to simplify the differ.
364 # FIXME: Once new objects are swapped in, fix the original
365 my %dom = $sto->get_all_domains;
366 my @fixed_dom = ();
367 while (my ($name, $id) = each %dom) {
368 push(@fixed_dom, { namespace => $name, dmid => $id });
371 my $set = $sto->server_settings;
372 my @fixed_set = ();
373 while (my ($field, $value) = each %$set) {
374 push(@fixed_set, { field => $field, value => $value });
377 my %ret = ( domain => \@fixed_dom,
378 class => [$sto->get_all_classes],
379 host => [$sto->get_all_hosts],
380 device => [$sto->get_all_devices],
381 srvset => \@fixed_set, );
382 return \%ret;
385 # returns true on success, false on failure
386 sub check_usage_response {
387 my ($self, $dev, $response) = @_;
388 my $devid = $dev->id;
390 my %stats;
391 my $data = $response->content;
392 foreach (split(/\r?\n/, $data)) {
393 next unless /^(\w+)\s*:\s*(.+)$/;
394 $stats{$1} = $2;
397 my ($used, $total) = ($stats{used}, $stats{total});
398 unless ($used && $total) {
399 $used = "<undef>" unless defined $used;
400 $total = "<undef>" unless defined $total;
401 my $clen = length($data || "");
402 error("dev$devid reports used = $used, total = $total, content-length: $clen, error?");
403 return 0;
406 my $rstate = $self->{refresh_state}->{$devid};
407 ($rstate->{used}, $rstate->{total}) = ($used, $total);
409 # only update database every ~15 seconds per device
410 if ($self->{updateable_devices}) {
411 my $devrow = $self->{updateable_devices}->{$devid};
412 my $last = ($devrow && $devrow->{mb_asof}) ? $devrow->{mb_asof} : 0;
413 if ($last + UPDATE_DB_EVERY < time()) {
414 Mgd::get_store()->update_device_usage(mb_total => int($total / 1024),
415 mb_used => int($used / 1024),
416 devid => $devid);
419 return 1;
422 sub dev_debug {
423 my ($self, $dev, $writable) = @_;
424 return unless $Mgd::DEBUG >= 1;
425 my $devid = $dev->id;
426 my $rstate = $self->{refresh_state}->{$devid};
427 my ($used, $total) = ($rstate->{used}, $rstate->{total});
429 debug("dev$devid: used = $used, total = $total, writeable = $writable");
432 sub check_write {
433 my ($self, $dev) = @_;
434 my $rstate = $self->{refresh_state}->{$dev->id};
435 my $test_write = $rstate->{test_write};
437 if (!$test_write || $test_write->{tries} > 0) {
438 # this was "$$-$now" before, but we don't yet have a cleaner in
439 # mogstored for these files
440 my $num = int(rand 100);
441 $test_write = $rstate->{test_write} ||= {};
442 $test_write->{path} = "/dev${\$dev->id}/test-write/test-write-$num";
443 $test_write->{content} = "time=" . time . " rand=$num";
444 $test_write->{tries} ||= 2;
446 $test_write->{tries}--;
448 my $opts = { content => $test_write->{content} };
449 $dev->host->http("PUT", $test_write->{path}, $opts, sub {
450 my ($response) = @_;
451 $self->on_check_write_response($dev, $response);
455 # starts the lengthy device check process
456 sub check_device_begin {
457 my ($self, $dev) = @_;
458 $self->{refresh_state}->{$dev->id} = {};
460 $self->check_device($dev);
463 # the lengthy device check process
464 sub check_device {
465 my ($self, $dev) = @_;
466 return $self->check_device_done($dev) if $self->{skip_host}{$dev->hostid};
468 my $devid = $dev->id;
469 my $url = $dev->usage_url;
470 my $host = $dev->host;
472 $self->{seen_hosts}{$host->ip} = 1;
474 # now try to get the data with a short timeout
475 my $start_time = Time::HiRes::time();
476 $host->http_get("GET", $dev->usage_url, undef, sub {
477 my ($response) = @_;
478 if (!$self->on_usage_response($dev, $response, $start_time)) {
479 return $self->check_device_done($dev);
481 # next if we're not going to try this now
482 my $now = time();
483 if (($self->{last_test_write}{$devid} || 0) + UPDATE_DB_EVERY > $now) {
484 return $self->check_device_done($dev);
486 $self->{last_test_write}{$devid} = $now;
488 unless ($dev->can_delete_from) {
489 # we should not try to write on readonly devices because it can be
490 # mounted as RO.
491 return $self->dev_observed_readonly($dev);
493 # now we want to check if this device is writeable
495 # first, create the test-write directory. this will return
496 # immediately after the first time, as the 'create_directory'
497 # function caches what it's already created.
498 $dev->create_directory("/dev$devid/test-write", sub {
499 $self->check_write($dev);
504 # called on a successful PUT, ensure the data we get back is what we uploaded
505 sub check_reread {
506 my ($self, $dev) = @_;
507 # now let's get it back to verify; note we use the get_port to
508 # verify that the distinction works (if we have one)
509 my $test_write = $self->{refresh_state}->{$dev->id}->{test_write};
510 $dev->host->http_get("GET", $test_write->{path}, undef, sub {
511 my ($response) = @_;
512 $self->on_check_reread_response($dev, $response);
516 sub on_check_reread_response {
517 my ($self, $dev, $response) = @_;
518 my $test_write = $self->{refresh_state}->{$dev->id}->{test_write};
520 # if success and the content matches, mark it writeable
521 if ($response->is_success) {
522 if ($response->content eq $test_write->{content}) {
523 if (!$dev->observed_writeable) {
524 my $event = { observed_state => 'writeable' };
525 $self->state_event('device', $dev->id, $event);
527 $self->dev_debug($dev, 1);
529 return $self->check_bogus_md5($dev); # onto the final check...
532 # content didn't match due to race, retry and hope we're lucky
533 return $self->check_write($dev) if ($test_write->{tries} > 0);
536 return $self->dev_observed_readonly($dev); # it's read-only at least
539 sub on_check_write_response {
540 my ($self, $dev, $response) = @_;
541 return $self->check_reread($dev) if $response->is_success;
542 return $self->dev_observed_readonly($dev);
545 # returns true on success, false on failure
546 sub on_usage_response {
547 my ($self, $dev, $response, $start_time) = @_;
548 my $host = $dev->host;
549 my $hostip = $host->ip;
551 if ($response->is_success) {
552 # at this point we can reach the host
553 if (!$host->observed_reachable) {
554 my $event = { observed_state => 'reachable' };
555 $self->state_event('host', $dev->hostid, $event);
557 $self->{iow}->restart_monitoring_if_needed($hostip);
559 return $self->check_usage_response($dev, $response);
562 my $url = $dev->usage_url;
563 my $failed_after = Time::HiRes::time() - $start_time;
564 if ($failed_after < 0.5) {
565 if (!$dev->observed_unreachable) {
566 my $event = { observed_state => 'unreachable' };
567 $self->state_event('device', $dev->id, $event);
569 my $get_port = $host->http_get_port;
570 error("Port $get_port not listening on $hostip ($url)? Error was: " . $response->status_line);
571 } else {
572 $failed_after = sprintf("%.02f", $failed_after);
573 if (!$host->observed_unreachable) {
574 my $event = { observed_state => 'unreachable' };
575 $self->state_event('host', $dev->hostid, $event);
577 $self->{skip_host}{$dev->hostid} = 1;
578 my $timeout = MogileFS->config("node_timeout");
579 my $devid = $dev->id;
580 error("Timeout contacting $hostip dev $devid ($url): took $failed_after seconds out of $timeout allowed");
582 return 0; # failure
585 sub check_bogus_md5 {
586 my ($self, $dev) = @_;
587 my $put_path = "/dev${\$dev->id}/test-write/test-md5";
588 my $opts = {
589 headers => { "Content-MD5" => md5_base64("!") . "==", },
590 content => '.',
593 # success is bad here, it means the server doesn't understand how to
594 # verify and reject corrupt bodies from Content-MD5 headers.
595 # most servers /will/ succeed here :<
596 $dev->host->http("PUT", $put_path, $opts, sub {
597 my ($response) = @_;
598 $self->on_bogus_md5_response($dev, $response);
602 sub on_bogus_md5_response {
603 my ($self, $dev, $response) = @_;
604 my $rej = $response->is_success ? 0 : 1;
605 my $prev = $dev->reject_bad_md5;
607 if (!defined($prev) || $prev != $rej) {
608 debug("dev${\$dev->id}: reject_bad_md5 = $rej");
609 $self->state_event('device', $dev->id, { reject_bad_md5 => $rej });
611 return $self->check_device_done($dev);
614 # if we fall through to here, then we know that something is not so
615 # good, so mark it readable which is guaranteed given we even tested
616 # writeability
617 sub dev_observed_readonly {
618 my ($self, $dev) = @_;
620 if (!$dev->observed_readable) {
621 my $event = { observed_state => 'readable' };
622 $self->state_event('device', $dev->id, $event);
624 $self->dev_debug($dev, 0);
625 return $self->check_device_done($dev);
628 # called when all checks are done for a particular device
629 sub check_device_done {
630 my ($self, $dev) = @_;
632 $self->still_alive; # Ping parent if needed so we don't time out
633 # given lots of devices.
634 delete $self->{refresh_state}->{$dev->id};
636 # if refresh_state is totally empty, we're done
637 if ((scalar keys %{$self->{refresh_state}}) == 0) {
638 $self->usage_refresh_done;
644 # Local Variables:
645 # mode: perl
646 # c-basic-indent: 4
647 # indent-tabs-mode: nil
648 # End: