Merge "Typo fix"
[mediawiki.git] / includes / search / SearchUpdate.php
blob714691706af7512aaa6439cd3a6596210a052bda
1 <?php
2 /**
3 * Search index updater
5 * See deferred.txt
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
17 * You should have received a copy of the GNU General Public License along
18 * with this program; if not, write to the Free Software Foundation, Inc.,
19 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 * http://www.gnu.org/copyleft/gpl.html
22 * @file
23 * @ingroup Search
26 /**
27 * Database independant search index updater
29 * @ingroup Search
31 class SearchUpdate implements DeferrableUpdate {
32 /**
33 * Page id being updated
34 * @var int
36 private $id = 0;
38 /**
39 * Title we're updating
40 * @var Title
42 private $title;
44 /**
45 * Content of the page (not text)
46 * @var Content|false
48 private $content;
50 /**
51 * Constructor
53 * @param int $id Page id to update
54 * @param Title|string $title Title of page to update
55 * @param Content|string|false $c Content of the page to update.
56 * If a Content object, text will be gotten from it. String is for back-compat.
57 * Passing false tells the backend to just update the title, not the content
59 public function __construct( $id, $title, $c = false ) {
60 if ( is_string( $title ) ) {
61 $nt = Title::newFromText( $title );
62 } else {
63 $nt = $title;
66 if ( $nt ) {
67 $this->id = $id;
68 // is_string() check is back-compat for ApprovedRevs
69 if( is_string( $c ) ) {
70 $this->content = new TextContent( $c );
71 } else {
72 $this->content = $c ?: false;
74 $this->title = $nt;
75 } else {
76 wfDebug( "SearchUpdate object created with invalid title '$title'\n" );
80 /**
81 * Perform actual update for the entry
83 public function doUpdate() {
84 global $wgDisableSearchUpdate;
86 if ( $wgDisableSearchUpdate || !$this->id ) {
87 return;
90 wfProfileIn( __METHOD__ );
92 $search = SearchEngine::create();
93 $normalTitle = $search->normalizeText(
94 Title::indexTitle( $this->title->getNamespace(), $this->title->getText() ) );
96 if ( WikiPage::newFromId( $this->id ) === null ) {
97 $search->delete( $this->id, $normalTitle );
98 wfProfileOut( __METHOD__ );
99 return;
100 } elseif ( $this->content === false ) {
101 $search->updateTitle( $this->id, $normalTitle );
102 wfProfileOut( __METHOD__ );
103 return;
106 $text = $search->getTextFromContent( $this->title, $this->content );
107 if( wfRunHooks( 'SearchUpdate', array( $this->id, $this->title, &$text, $this->content ) ) ) {
108 $text = self::updateText( $text );
111 # Perform the actual update
112 $search->update( $this->id, $normalTitle, $search->normalizeText( $text ) );
114 wfProfileOut( __METHOD__ );
118 * Clean text for indexing. Only really suitable for indexing in databases.
119 * If you're using a real search engine, you'll probably want to override
120 * this behavior and do something nicer with the original wikitext.
122 public static function updateText( $text ) {
123 global $wgContLang;
125 # Language-specific strip/conversion
126 $text = $wgContLang->normalizeForSearch( $text );
127 $lc = SearchEngine::legalSearchChars() . '&#;';
129 wfProfileIn( __METHOD__ . '-regexps' );
130 $text = preg_replace( "/<\\/?\\s*[A-Za-z][^>]*?>/",
131 ' ', $wgContLang->lc( " " . $text . " " ) ); # Strip HTML markup
132 $text = preg_replace( "/(^|\\n)==\\s*([^\\n]+)\\s*==(\\s)/sD",
133 "\\1\\2 \\2 \\2\\3", $text ); # Emphasize headings
135 # Strip external URLs
136 $uc = "A-Za-z0-9_\\/:.,~%\\-+&;#?!=()@\\x80-\\xFF";
137 $protos = "http|https|ftp|mailto|news|gopher";
138 $pat = "/(^|[^\\[])({$protos}):[{$uc}]+([^{$uc}]|$)/";
139 $text = preg_replace( $pat, "\\1 \\3", $text );
141 $p1 = "/([^\\[])\\[({$protos}):[{$uc}]+]/";
142 $p2 = "/([^\\[])\\[({$protos}):[{$uc}]+\\s+([^\\]]+)]/";
143 $text = preg_replace( $p1, "\\1 ", $text );
144 $text = preg_replace( $p2, "\\1 \\3 ", $text );
146 # Internal image links
147 $pat2 = "/\\[\\[image:([{$uc}]+)\\.(gif|png|jpg|jpeg)([^{$uc}])/i";
148 $text = preg_replace( $pat2, " \\1 \\3", $text );
150 $text = preg_replace( "/([^{$lc}])([{$lc}]+)]]([a-z]+)/",
151 "\\1\\2 \\2\\3", $text ); # Handle [[game]]s
153 # Strip all remaining non-search characters
154 $text = preg_replace( "/[^{$lc}]+/", " ", $text );
156 # Handle 's, s'
158 # $text = preg_replace( "/([{$lc}]+)'s /", "\\1 \\1's ", $text );
159 # $text = preg_replace( "/([{$lc}]+)s' /", "\\1s ", $text );
161 # These tail-anchored regexps are insanely slow. The worst case comes
162 # when Japanese or Chinese text (ie, no word spacing) is written on
163 # a wiki configured for Western UTF-8 mode. The Unicode characters are
164 # expanded to hex codes and the "words" are very long paragraph-length
165 # monstrosities. On a large page the above regexps may take over 20
166 # seconds *each* on a 1GHz-level processor.
168 # Following are reversed versions which are consistently fast
169 # (about 3 milliseconds on 1GHz-level processor).
171 $text = strrev( preg_replace( "/ s'([{$lc}]+)/", " s'\\1 \\1", strrev( $text ) ) );
172 $text = strrev( preg_replace( "/ 's([{$lc}]+)/", " s\\1", strrev( $text ) ) );
174 # Strip wiki '' and '''
175 $text = preg_replace( "/''[']*/", " ", $text );
176 wfProfileOut( __METHOD__ . '-regexps' );
177 return $text;