Bug 470455 - test_database_sync_embed_visits.js leaks, r=sdwilsh
[wine-gecko.git] / tools / page-loader / dump.pl
blob1c4aebc7bcbb65bd2504dde63dc7ef9f38355e2b
1 #!/usr/bin/perl
2 #
3 # ***** BEGIN LICENSE BLOCK *****
4 # Version: MPL 1.1/GPL 2.0/LGPL 2.1
6 # The contents of this file are subject to the Mozilla Public License Version
7 # 1.1 (the "License"); you may not use this file except in compliance with
8 # the License. You may obtain a copy of the License at
9 # http://www.mozilla.org/MPL/
11 # Software distributed under the License is distributed on an "AS IS" basis,
12 # WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
13 # for the specific language governing rights and limitations under the
14 # License.
16 # The Original Code is Mozilla page-loader test, released Aug 5, 2001.
18 # The Initial Developer of the Original Code is
19 # Netscape Communications Corporation.
20 # Portions created by the Initial Developer are Copyright (C) 2001
21 # the Initial Developer. All Rights Reserved.
23 # Contributor(s):
24 # John Morrison <jrgm@netscape.com>, original author
26 # Alternatively, the contents of this file may be used under the terms of
27 # either the GNU General Public License Version 2 or later (the "GPL"), or
28 # the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
29 # in which case the provisions of the GPL or the LGPL are applicable instead
30 # of those above. If you wish to allow use of your version of this file only
31 # under the terms of either the GPL or the LGPL, and not to allow others to
32 # use your version of this file under the terms of the MPL, indicate your
33 # decision by deleting the provisions above and replace them with the notice
34 # and other provisions required by the GPL or the LGPL. If you do not delete
35 # the provisions above, a recipient may use your version of this file under
36 # the terms of any one of the MPL, the GPL or the LGPL.
38 # ***** END LICENSE BLOCK *****
39 use DBI;
40 use CGI::Carp qw(fatalsToBrowser);
41 use CGI::Request;
42 use URLTimingDataSet;
43 use File::Copy ();
44 use strict;
46 use vars qw($dbh $arc $dbroot); # current db, and db/archive
48 use constant STALE_AGE => 5 * 60; # seconds
50 # show a chart of this run; turned off in automated tests, and where
51 # an installation hasn't set up the required modules and libraries
52 use constant SHOW_CHART => 0;
54 sub createArchiveMetaTable {
55 my $table = "tMetaTable";
56 return if -e "$dbroot/archive/$table"; # don't create it if it exists
57 warn "createMetaTable:\t$dbroot/archive/$table";
58 mkdir "$dbroot/archive" unless -d "$dbroot/archive";
59 my ($sth, $sql);
60 $sql = qq{
61 CREATE TABLE tMetaTable
62 (DATETIME CHAR(14), LASTPING CHAR(14),
63 ID CHAR(8), INDEX INTEGER,
64 CUR_IDX INTEGER, CUR_CYC INTEGER,
65 CUR_CONTENT CHAR(128), STATE INTEGER,
66 BLESSED INTEGER, MAXCYC INTEGER,
67 MAXIDX INTEGER, REPLACE INTEGER,
68 NOCACHE INTEGER, DELAY INTEGER,
69 REMOTE_USER CHAR(16), HTTP_USER_AGENT CHAR(128),
70 REMOTE_ADDR CHAR(15), USER_EMAIL CHAR(32),
71 USER_COMMENT CHAR(256)
74 $sth = $arc->prepare($sql);
75 $sth->execute();
76 $sth->finish();
77 warn 'created archive meta table';
78 return 1;
82 sub purgeStaleEntries {
83 my $id = shift;
84 my $metatable = "tMetaTable";
86 # first, remove dead stuff
87 my $sql = qq{SELECT * FROM $metatable
88 WHERE STATE = "INIT" OR STATE = "OPEN"};
89 my $sth = $dbh->prepare($sql);
90 $sth->execute();
91 my $now = time();
92 my $status;
93 while (my @data = $sth->fetchrow_array()) {
94 my $age = $now - timestamp2Time($data[1]);
95 # if OPEN or INIT, and not heard from in 10 minutes, then it's never coming
96 # back here again. Delete the entry. Whine in the error_log.
97 if ($age > STALE_AGE) {
98 warn "deleting stale record+table, id = $data[2], last = $data[1], @data";
99 $dbh->do( qq(DELETE FROM $metatable WHERE ID = "$data[2]") );
100 $dbh->do("DROP TABLE t" . $data[2]);
102 $status .= "$age @data\n";
104 $sth->finish();
106 # now move any COMPLETE records to archive
107 $sql = qq{SELECT * FROM $metatable};
108 $sth = $dbh->prepare($sql);
109 $sth->execute();
110 $now = time();
111 while (my @data = $sth->fetchrow_array()) {
112 my $age = $now - timestamp2Time($data[1]);
113 # This keeps the "live" entries from growing too slow.
114 # If COMPLETE and older than 10 minutes, move to archive.
115 if ($age > STALE_AGE) {
116 warn "moving COMPLETE record+table, id = $data[2], last = $data[1], @data";
117 moveRecordToArchive($data[2], \@data);
118 $dbh->do( qq(DELETE FROM $metatable WHERE ID = "$data[2]") );
121 $sth->finish();
124 if (!SHOW_CHART) {
125 # Don't move it if showing a chart. (Otherwise, if showing a
126 # a chart, I'd have to do a little extra work to make sure I
127 # didn't yank the record away from the IMG request)
128 $sql = qq{SELECT * FROM $metatable WHERE ID = "$id"};
129 $sth = $dbh->prepare($sql);
130 $sth->execute();
131 while (my @data = $sth->fetchrow_array()) {
132 warn "moving COMPLETE record+table, id = $id, @data\n";
133 moveRecordToArchive($data[2], \@data);
134 $dbh->do( qq(DELETE FROM $metatable WHERE ID = "$data[2]") );
137 $sth->finish();
141 sub moveRecordToArchive {
142 my $id = shift || die "no id";
143 my $dataref = shift || die "no dataref";
144 createArchiveMetaTable(); # if it doesn't exist
145 insertIntoMetaTable($dataref);
146 File::Copy::move("$dbroot/t$id", "$dbroot/archive/t$id");
150 sub insertIntoMetaTable {
151 my $dataref = shift || die "no dataref";
152 my $table = "tMetaTable";
153 my ($sth, $sql);
154 $sql = qq{
155 INSERT INTO $table
156 (DATETIME, LASTPING, ID,
157 INDEX, CUR_IDX, CUR_CYC,
158 CUR_CONTENT, STATE, BLESSED,
159 MAXCYC, MAXIDX, REPLACE,
160 NOCACHE, DELAY, REMOTE_USER,
161 HTTP_USER_AGENT, REMOTE_ADDR, USER_EMAIL,
162 USER_COMMENT
164 VALUES (?,?,?,?,
165 ?,?,?,?,
166 ?,?,?,?,
167 ?,?,?,?,
168 ?,?,?)
170 $sth = $arc->prepare($sql);
171 $sth->execute(@$dataref);
172 $sth->finish();
176 sub timestamp2Time ($) {
177 my $str = shift;
178 use Time::Local ();
179 my @datetime = reverse unpack 'A4A2A2A2A2A2', $str;
180 --$datetime[4]; # month: 0-11
181 return Time::Local::timelocal(@datetime);
185 sub serializeDataSet {
186 # package up this data for storage elsewhere
187 my $rs = shift;
188 my $data = "avgmedian|" . $rs->{avgmedian};
189 $data .= "|average|" . $rs->{average};
190 $data .= "|minimum|" . $rs->{minimum};
191 $data .= "|maximum|" . $rs->{maximum};
192 $_ = $rs->as_string;
193 s/^\s+//gs;
194 s/\s+\n$//gs;
195 s/\s*\n/\|/gs; # fold newlines
196 s/\|\s+/\|/gs;
197 s/\s+/;/gs;
198 return $data . ":" . $_;
202 # handle the request
204 my $request = new CGI::Request;
205 my $id = $request->param('id'); #XXX need to check for valid parameter id
206 my $rs = URLTimingDataSet->new($id);
208 print "Content-type: text/html\n\n";
210 # This sucks: we'll let the test time out to avoid crash-on-shutdown bugs
211 print "<html><body onload='window.close();'>";
213 # dump some stats for tinderbox to snarf
215 print "<script>\n";
216 print "if (window.dump) dump('";
217 print "Starting Page Load Test\\n\\\n";
218 print "Test id: $id\\n\\\n";
219 print "Avg. Median : ", $rs->{avgmedian}, " msec\\n\\\n";
220 print "Average : ", $rs->{average}, " msec\\n\\\n";
221 print "Minimum : ", $rs->{minimum}, " msec\\n\\\n";
222 print "Maximum : ", $rs->{maximum}, " msec\\n\\\n";
223 print "IDX PATH AVG MED MAX MIN TIMES ...\\n\\\n";
224 if ($request->param('sort')) {
225 $_ = $rs->as_string_sorted();
226 } else {
227 $_ = $rs->as_string();
230 # Terminate raw newlines with '\n\' so we don't have an unterminated string literal.
232 s/\n/\\n\\\n/g;
233 print $_;
234 print "(tinderbox dropping follows)\\n\\\n";
235 print "_x_x_mozilla_page_load," , $rs->{avgmedian}, ",", $rs->{maximum}, ",", $rs->{minimum}, "\\n\\\n";
237 # package up this data for storage elsewhere
239 my $data = serializeDataSet($rs);
240 print "_x_x_mozilla_page_load_details,", $data, "\\n\\\n";
242 # average median
244 #print "TinderboxPrint:<a title=\"Avg. of the median per url pageload time.\" href=\"http://tegu.mozilla.org/graph/query.cgi?tbox=spider&testname=pageload&autoscale=1&days=7&avg=1\">Tp:", $rs->{avgmedian}, "ms</a>", "\\n\\\n";
245 print "');";
246 print "</script></body></html>\n";
250 # If this is SurfingSafari, then catch a wave and you're sitting on top of the world!!
251 # (and also blat this out to tegu, cause we got no 'dump' statement.
253 if ($request->cgi->var("HTTP_USER_AGENT") =~ /Safari/) {
254 my %machineMap =
256 "10.169.105.26" => "boxset",
257 "10.169.105.21" => "pawn"
259 my $ip = $request->cgi->var('REMOTE_ADDR');
260 my $machine = $machineMap{$ip};
261 my $res = eval q{
262 use LWP::UserAgent;
263 use HTTP::Request::Common qw(POST);
264 my $ua = LWP::UserAgent->new;
265 $ua->timeout(10); # seconds
266 my $req = POST('http://tegu.mozilla.org/graph/collect.cgi',
267 [testname => 'pageload',
268 tbox => "$machine" . "-aux",
269 value => $rs->{avgmedian},
270 data => $data]);
271 my $res = $ua->request($req);
272 return $res;
274 if ($@) {
275 warn "Failed to submit startup results: $@";
276 } else {
277 warn "Startup results submitted to server: \n",
278 $res->status_line, "\n", $res->content, "\n";
283 if ($request->param('purge')) {
284 # now move any old stuff into archive and clean stale entries
285 # just going with the simple approach of "whoever sees old entries
286 # first, cleans em up, whether they 'own' them or not". Hopefully,
287 # the default locking will be sufficient to prevent a race.
288 close(STDOUT);
289 sleep(1);
290 $dbroot = "db";
291 $dbh = DBI->connect("DBI:CSV:f_dir=./$dbroot",
292 {RaiseError => 1, AutoCommit => 1})
293 || die "Cannot connect: " . $DBI::errstr;
294 $arc = DBI->connect("DBI:CSV:f_dir=./$dbroot/archive",
295 {RaiseError => 1, AutoCommit => 1})
296 || die "Cannot connect: " . $DBI::errstr;
297 purgeStaleEntries($id);
298 $dbh->disconnect();
299 $arc->disconnect();
302 exit 0;