summaryrefslogtreecommitdiff
path: root/maintenance
diff options
context:
space:
mode:
authorPierre Schmitz <pierre@archlinux.de>2006-10-11 18:12:39 +0000
committerPierre Schmitz <pierre@archlinux.de>2006-10-11 18:12:39 +0000
commit183851b06bd6c52f3cae5375f433da720d410447 (patch)
treea477257decbf3360127f6739c2f9d0ec57a03d39 /maintenance
MediaWiki 1.7.1 wiederhergestellt
Diffstat (limited to 'maintenance')
-rw-r--r--maintenance/.htaccess1
-rw-r--r--maintenance/Doxyfile279
-rw-r--r--maintenance/FiveUpgrade.inc1214
-rw-r--r--maintenance/InitialiseMessages.inc240
-rw-r--r--maintenance/Makefile20
-rw-r--r--maintenance/README85
-rw-r--r--maintenance/addwiki.php210
-rw-r--r--maintenance/alltrans.php11
-rw-r--r--maintenance/apache-ampersand.diff53
-rw-r--r--maintenance/archives/.htaccess1
-rw-r--r--maintenance/archives/patch-archive-rev_id.sql6
-rw-r--r--maintenance/archives/patch-archive-text_id.sql14
-rw-r--r--maintenance/archives/patch-bot.sql11
-rw-r--r--maintenance/archives/patch-cache.sql41
-rw-r--r--maintenance/archives/patch-categorylinks.sql39
-rw-r--r--maintenance/archives/patch-drop-user_newtalk.sql3
-rw-r--r--maintenance/archives/patch-drop_img_type.sql3
-rw-r--r--maintenance/archives/patch-email-authentication.sql3
-rw-r--r--maintenance/archives/patch-email-notification.sql11
-rw-r--r--maintenance/archives/patch-externallinks.sql13
-rw-r--r--maintenance/archives/patch-filearchive.sql51
-rw-r--r--maintenance/archives/patch-hitcounter.sql9
-rw-r--r--maintenance/archives/patch-image_name_primary.sql6
-rw-r--r--maintenance/archives/patch-image_name_unique.sql6
-rw-r--r--maintenance/archives/patch-img_exif.sql3
-rw-r--r--maintenance/archives/patch-img_media_type.sql17
-rw-r--r--maintenance/archives/patch-img_metadata.sql6
-rw-r--r--maintenance/archives/patch-img_width.sql18
-rw-r--r--maintenance/archives/patch-indexes.sql24
-rw-r--r--maintenance/archives/patch-interwiki-trans.sql2
-rw-r--r--maintenance/archives/patch-interwiki.sql20
-rw-r--r--maintenance/archives/patch-inverse_timestamp.sql15
-rw-r--r--maintenance/archives/patch-ipb_expiry.sql8
-rw-r--r--maintenance/archives/patch-ipb_range_start.sql25
-rw-r--r--maintenance/archives/patch-ipblocks.sql6
-rw-r--r--maintenance/archives/patch-job.sql20
-rw-r--r--maintenance/archives/patch-langlinks.sql14
-rw-r--r--maintenance/archives/patch-linkscc-1.3.sql6
-rw-r--r--maintenance/archives/patch-linkscc.sql12
-rw-r--r--maintenance/archives/patch-linktables.sql70
-rw-r--r--maintenance/archives/patch-list.txt182
-rw-r--r--maintenance/archives/patch-log_params.sql1
-rw-r--r--maintenance/archives/patch-logging-times-index.sql9
-rw-r--r--maintenance/archives/patch-logging-title.sql6
-rw-r--r--maintenance/archives/patch-logging.sql37
-rw-r--r--maintenance/archives/patch-math.sql28
-rw-r--r--maintenance/archives/patch-mimesearch-indexes.sql22
-rw-r--r--maintenance/archives/patch-objectcache.sql9
-rw-r--r--maintenance/archives/patch-oldestindex.sql5
-rw-r--r--maintenance/archives/patch-page_len.sql16
-rw-r--r--maintenance/archives/patch-pagelinks.sql56
-rw-r--r--maintenance/archives/patch-parsercache.sql15
-rw-r--r--maintenance/archives/patch-profiling.sql10
-rw-r--r--maintenance/archives/patch-querycache.sql16
-rw-r--r--maintenance/archives/patch-querycacheinfo.sql12
-rw-r--r--maintenance/archives/patch-random-dateindex.sql54
-rw-r--r--maintenance/archives/patch-rc-newindex.sql9
-rw-r--r--maintenance/archives/patch-rc-patrol.sql9
-rw-r--r--maintenance/archives/patch-rc_id.sql7
-rw-r--r--maintenance/archives/patch-rc_ip.sql7
-rw-r--r--maintenance/archives/patch-rc_type.sql9
-rw-r--r--maintenance/archives/patch-rename-group.sql10
-rw-r--r--maintenance/archives/patch-rename-user_groups-and_rights.sql9
-rw-r--r--maintenance/archives/patch-restructure.sql147
-rw-r--r--maintenance/archives/patch-rev_deleted.sql11
-rw-r--r--maintenance/archives/patch-rev_text_id.sql17
-rw-r--r--maintenance/archives/patch-searchindex.sql40
-rw-r--r--maintenance/archives/patch-ss_images.sql5
-rw-r--r--maintenance/archives/patch-ss_total_articles.sql6
-rw-r--r--maintenance/archives/patch-templatelinks.sql19
-rw-r--r--maintenance/archives/patch-trackbacks.sql10
-rw-r--r--maintenance/archives/patch-transcache.sql7
-rw-r--r--maintenance/archives/patch-user-realname.sql5
-rw-r--r--maintenance/archives/patch-user_email_token.sql12
-rw-r--r--maintenance/archives/patch-user_groups.sql25
-rw-r--r--maintenance/archives/patch-user_nameindex.sql13
-rw-r--r--maintenance/archives/patch-user_registration.sql9
-rw-r--r--maintenance/archives/patch-user_rights.sql21
-rw-r--r--maintenance/archives/patch-user_token.sql15
-rw-r--r--maintenance/archives/patch-userindex.sql1
-rw-r--r--maintenance/archives/patch-userlevels-defaultgroups.sql30
-rw-r--r--maintenance/archives/patch-userlevels-rights.sql5
-rw-r--r--maintenance/archives/patch-userlevels.sql22
-rw-r--r--maintenance/archives/patch-usernewtalk.sql20
-rw-r--r--maintenance/archives/patch-usernewtalk2.sql6
-rw-r--r--maintenance/archives/patch-val_ip.sql4
-rw-r--r--maintenance/archives/patch-validate.sql13
-rw-r--r--maintenance/archives/patch-watchlist-null.sql9
-rw-r--r--maintenance/archives/patch-watchlist.sql30
-rw-r--r--maintenance/archives/rebuildRecentchanges.inc122
-rw-r--r--maintenance/archives/upgradeWatchlist.php67
-rw-r--r--maintenance/attachLatest.php73
-rw-r--r--maintenance/attribute.php105
-rw-r--r--maintenance/backup.inc296
-rw-r--r--maintenance/backupPrefetch.inc203
-rw-r--r--maintenance/benchmarkPurge.php65
-rw-r--r--maintenance/build-intl-wiki.sql31
-rw-r--r--maintenance/changePassword.php53
-rw-r--r--maintenance/changeuser.sql12
-rw-r--r--maintenance/checkUsernames.php37
-rw-r--r--maintenance/checktrans.php30
-rw-r--r--maintenance/cleanupCaps.php158
-rw-r--r--maintenance/cleanupDupes.inc131
-rw-r--r--maintenance/cleanupDupes.php37
-rw-r--r--maintenance/cleanupSpam.php112
-rw-r--r--maintenance/cleanupTitles.php210
-rw-r--r--maintenance/cleanupWatchlist.php141
-rw-r--r--maintenance/clear_interwiki_cache.php26
-rw-r--r--maintenance/clear_stats.php31
-rw-r--r--maintenance/commandLine.inc232
-rw-r--r--maintenance/convertLinks.inc220
-rw-r--r--maintenance/convertLinks.php16
-rw-r--r--maintenance/counter.php5
-rw-r--r--maintenance/createAndPromote.php48
-rw-r--r--maintenance/database.sql7
-rw-r--r--maintenance/delete-idle-wiki-users.pl138
-rw-r--r--maintenance/deleteBatch.php85
-rw-r--r--maintenance/deleteImageMemcached.php60
-rw-r--r--maintenance/deleteOldRevisions.inc60
-rw-r--r--maintenance/deleteOldRevisions.php30
-rw-r--r--maintenance/deleteOrphanedRevisions.inc.php33
-rw-r--r--maintenance/deleteOrphanedRevisions.php55
-rw-r--r--maintenance/deleteRevision.php40
-rw-r--r--maintenance/diffLanguage.php159
-rw-r--r--maintenance/dtrace/counts.d23
-rw-r--r--maintenance/dtrace/tree.d26
-rw-r--r--maintenance/dumpBackup.php99
-rw-r--r--maintenance/dumpHTML.inc650
-rw-r--r--maintenance/dumpHTML.php131
-rw-r--r--maintenance/dumpInterwiki.inc219
-rw-r--r--maintenance/dumpInterwiki.php25
-rw-r--r--maintenance/dumpLinks.php63
-rw-r--r--maintenance/dumpMessages.php19
-rw-r--r--maintenance/dumpReplayLog.php118
-rw-r--r--maintenance/dumpTextPass.php347
-rw-r--r--maintenance/duplicatetrans.php29
-rw-r--r--maintenance/entities2literals.pl276
-rw-r--r--maintenance/eval.php63
-rw-r--r--maintenance/fetchInterwiki.pl102
-rw-r--r--maintenance/findhooks.php93
-rw-r--r--maintenance/fixSlaveDesync.php100
-rw-r--r--maintenance/fixTimestamps.php104
-rw-r--r--maintenance/fixUserRegistration.php31
-rw-r--r--maintenance/generateSitemap.php463
-rw-r--r--maintenance/importDump.php141
-rw-r--r--maintenance/importImages.inc.php67
-rw-r--r--maintenance/importImages.php101
-rw-r--r--maintenance/importLogs.inc144
-rw-r--r--maintenance/importLogs.php27
-rw-r--r--maintenance/importPhase2.php370
-rw-r--r--maintenance/importTextFile.inc75
-rw-r--r--maintenance/importTextFile.php111
-rw-r--r--maintenance/importUseModWiki.php365
-rw-r--r--maintenance/initStats.php78
-rw-r--r--maintenance/interwiki.sql179
-rw-r--r--maintenance/lang2po.php154
-rw-r--r--maintenance/langmemusage.php30
-rw-r--r--maintenance/languages.inc48
-rw-r--r--maintenance/mcc.php173
-rw-r--r--maintenance/mctest.php59
-rw-r--r--maintenance/moveBatch.php85
-rw-r--r--maintenance/mwdocgen.php205
-rw-r--r--maintenance/mwdoxygen.cfg1136
-rw-r--r--maintenance/mysql5/tables.sql1009
-rw-r--r--maintenance/namespace2sql.php14
-rw-r--r--maintenance/namespaceDupes.php194
-rw-r--r--maintenance/nukePage.inc80
-rw-r--r--maintenance/nukePage.php30
-rw-r--r--maintenance/oracle/archives/patch-trackbacks.sql10
-rw-r--r--maintenance/oracle/archives/patch-transcache.sql5
-rw-r--r--maintenance/oracle/interwiki.sql178
-rw-r--r--maintenance/oracle/tables.sql333
-rw-r--r--maintenance/orphans.php207
-rw-r--r--maintenance/ourusers.php121
-rw-r--r--maintenance/parserTests.inc791
-rw-r--r--maintenance/parserTests.php64
-rw-r--r--maintenance/parserTests.txt5475
-rw-r--r--maintenance/parserTestsParserHook.php34
-rw-r--r--maintenance/parserTestsParserTime.php26
-rw-r--r--maintenance/parserTestsStaticParserHook.php44
-rw-r--r--maintenance/postgres/tables.sql420
-rw-r--r--maintenance/purgeOldText.inc63
-rw-r--r--maintenance/purgeOldText.php30
-rw-r--r--maintenance/reassignEdits.inc.php144
-rw-r--r--maintenance/reassignEdits.php57
-rw-r--r--maintenance/rebuildImages.php275
-rw-r--r--maintenance/rebuildInterwiki.inc260
-rw-r--r--maintenance/rebuildInterwiki.php31
-rw-r--r--maintenance/rebuildMessages.php66
-rw-r--r--maintenance/rebuildall.php39
-rw-r--r--maintenance/rebuildrecentchanges.inc97
-rw-r--r--maintenance/rebuildrecentchanges.php25
-rw-r--r--maintenance/rebuildtextindex.inc68
-rw-r--r--maintenance/rebuildtextindex.php25
-rw-r--r--maintenance/recount.sql8
-rw-r--r--maintenance/redundanttrans.php28
-rw-r--r--maintenance/refreshImageCount.php25
-rw-r--r--maintenance/refreshLinks.inc131
-rw-r--r--maintenance/refreshLinks.php32
-rw-r--r--maintenance/removeUnusedAccounts.inc47
-rw-r--r--maintenance/removeUnusedAccounts.php58
-rw-r--r--maintenance/renderDump.php103
-rw-r--r--maintenance/runJobs.php20
-rw-r--r--maintenance/showJobs.php19
-rw-r--r--maintenance/splitLanguageFiles.inc1168
-rw-r--r--maintenance/splitLanguageFiles.php13
-rw-r--r--maintenance/stats.php45
-rw-r--r--maintenance/storage/blobs.sql8
-rw-r--r--maintenance/storage/checkStorage.php468
-rw-r--r--maintenance/storage/compressOld.inc300
-rw-r--r--maintenance/storage/compressOld.php82
-rw-r--r--maintenance/storage/dumpRev.php14
-rwxr-xr-xmaintenance/storage/make-blobs11
-rw-r--r--maintenance/storage/moveToExternal.php97
-rw-r--r--maintenance/storage/resolveStubs.php100
-rw-r--r--maintenance/tables.sql998
-rw-r--r--maintenance/transstat.php203
-rw-r--r--maintenance/trivialCmdLine.php21
-rw-r--r--maintenance/update.php71
-rw-r--r--maintenance/updateArticleCount.inc.php68
-rw-r--r--maintenance/updateArticleCount.php42
-rw-r--r--maintenance/updateSearchIndex.inc115
-rw-r--r--maintenance/updateSearchIndex.php57
-rw-r--r--maintenance/updateSpecialPages.php96
-rw-r--r--maintenance/updaters.inc835
-rw-r--r--maintenance/upgrade1_5.php24
-rw-r--r--maintenance/userDupes.inc328
-rw-r--r--maintenance/userDupes.php41
-rw-r--r--maintenance/users.sql12
-rw-r--r--maintenance/wiki-mangleme.php553
-rw-r--r--maintenance/wikipedia-interwiki.sql220
-rw-r--r--maintenance/wiktionary-interwiki.sql160
232 files changed, 30129 insertions, 0 deletions
diff --git a/maintenance/.htaccess b/maintenance/.htaccess
new file mode 100644
index 00000000..3a428827
--- /dev/null
+++ b/maintenance/.htaccess
@@ -0,0 +1 @@
+Deny from all
diff --git a/maintenance/Doxyfile b/maintenance/Doxyfile
new file mode 100644
index 00000000..06f2c096
--- /dev/null
+++ b/maintenance/Doxyfile
@@ -0,0 +1,279 @@
+# Doxyfile 1.4.6
+
+#
+# Some placeholders have been added for MediaWiki usage:
+# {{OUTPUT_DIRECTORY}}
+# {{STRIP_FROM_PATH}}
+# {{INPUT}}
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+PROJECT_NAME = MediaWiki
+PROJECT_NUMBER = trunk
+OUTPUT_DIRECTORY = {{OUTPUT_DIRECTORY}}
+CREATE_SUBDIRS = NO
+OUTPUT_LANGUAGE = English
+USE_WINDOWS_ENCODING = NO
+BRIEF_MEMBER_DESC = YES
+REPEAT_BRIEF = YES
+ABBREVIATE_BRIEF = "The $name class" \
+ "The $name widget" \
+ "The $name file" \
+ is \
+ provides \
+ specifies \
+ contains \
+ represents \
+ a \
+ an \
+ the
+ALWAYS_DETAILED_SEC = NO
+INLINE_INHERITED_MEMB = NO
+FULL_PATH_NAMES = YES
+STRIP_FROM_PATH = {{STRIP_FROM_PATH}}
+STRIP_FROM_INC_PATH =
+SHORT_NAMES = NO
+JAVADOC_AUTOBRIEF = NO
+MULTILINE_CPP_IS_BRIEF = NO
+DETAILS_AT_TOP = NO
+INHERIT_DOCS = YES
+SEPARATE_MEMBER_PAGES = NO
+TAB_SIZE = 8
+ALIASES =
+OPTIMIZE_OUTPUT_FOR_C = NO
+OPTIMIZE_OUTPUT_JAVA = NO
+BUILTIN_STL_SUPPORT = NO
+DISTRIBUTE_GROUP_DOC = NO
+SUBGROUPING = YES
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+EXTRACT_ALL = YES
+EXTRACT_PRIVATE = YES
+EXTRACT_STATIC = YES
+EXTRACT_LOCAL_CLASSES = YES
+EXTRACT_LOCAL_METHODS = NO
+HIDE_UNDOC_MEMBERS = NO
+HIDE_UNDOC_CLASSES = NO
+HIDE_FRIEND_COMPOUNDS = NO
+HIDE_IN_BODY_DOCS = NO
+INTERNAL_DOCS = NO
+CASE_SENSE_NAMES = YES
+HIDE_SCOPE_NAMES = NO
+SHOW_INCLUDE_FILES = YES
+INLINE_INFO = YES
+SORT_MEMBER_DOCS = YES
+SORT_BRIEF_DOCS = NO
+SORT_BY_SCOPE_NAME = NO
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+ENABLED_SECTIONS =
+MAX_INITIALIZER_LINES = 30
+SHOW_USED_FILES = YES
+SHOW_DIRECTORIES = NO
+FILE_VERSION_FILTER =
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+QUIET = NO
+WARNINGS = YES
+WARN_IF_UNDOCUMENTED = YES
+WARN_IF_DOC_ERROR = YES
+WARN_NO_PARAMDOC = NO
+WARN_FORMAT = "$file:$line: $text"
+WARN_LOGFILE =
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+INPUT = {{INPUT}}
+FILE_PATTERNS = *.c \
+ *.cc \
+ *.cxx \
+ *.cpp \
+ *.c++ \
+ *.d \
+ *.java \
+ *.ii \
+ *.ixx \
+ *.ipp \
+ *.i++ \
+ *.inl \
+ *.h \
+ *.hh \
+ *.hxx \
+ *.hpp \
+ *.h++ \
+ *.idl \
+ *.odl \
+ *.cs \
+ *.php \
+ *.php3 \
+ *.inc \
+ *.m \
+ *.mm \
+ *.dox \
+ *.py \
+ *.C \
+ *.CC \
+ *.C++ \
+ *.II \
+ *.I++ \
+ *.H \
+ *.HH \
+ *.H++ \
+ *.CS \
+ *.PHP \
+ *.PHP3 \
+ *.M \
+ *.MM \
+ *.PY
+RECURSIVE = YES
+EXCLUDE =
+EXCLUDE_SYMLINKS = NO
+EXCLUDE_PATTERNS =
+EXAMPLE_PATH =
+EXAMPLE_PATTERNS = *
+EXAMPLE_RECURSIVE = NO
+IMAGE_PATH =
+INPUT_FILTER =
+FILTER_PATTERNS =
+FILTER_SOURCE_FILES = NO
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+SOURCE_BROWSER = YES
+INLINE_SOURCES = NO
+STRIP_CODE_COMMENTS = YES
+REFERENCED_BY_RELATION = YES
+REFERENCES_RELATION = YES
+USE_HTAGS = NO
+VERBATIM_HEADERS = YES
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+ALPHABETICAL_INDEX = NO
+COLS_IN_ALPHA_INDEX = 5
+IGNORE_PREFIX =
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+GENERATE_HTML = YES
+HTML_OUTPUT = html
+HTML_FILE_EXTENSION = .html
+HTML_HEADER =
+HTML_FOOTER =
+HTML_STYLESHEET =
+HTML_ALIGN_MEMBERS = YES
+GENERATE_HTMLHELP = NO
+CHM_FILE =
+HHC_LOCATION =
+GENERATE_CHI = NO
+BINARY_TOC = NO
+TOC_EXPAND = NO
+DISABLE_INDEX = NO
+ENUM_VALUES_PER_LINE = 4
+GENERATE_TREEVIEW = YES
+TREEVIEW_WIDTH = 250
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+GENERATE_LATEX = NO
+LATEX_OUTPUT = latex
+LATEX_CMD_NAME = latex
+MAKEINDEX_CMD_NAME = makeindex
+COMPACT_LATEX = NO
+PAPER_TYPE = a4wide
+EXTRA_PACKAGES =
+LATEX_HEADER =
+PDF_HYPERLINKS = NO
+USE_PDFLATEX = NO
+LATEX_BATCHMODE = NO
+LATEX_HIDE_INDICES = NO
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+GENERATE_RTF = NO
+RTF_OUTPUT = rtf
+COMPACT_RTF = NO
+RTF_HYPERLINKS = NO
+RTF_STYLESHEET_FILE =
+RTF_EXTENSIONS_FILE =
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+GENERATE_MAN = NO
+MAN_OUTPUT = man
+MAN_EXTENSION = .3
+MAN_LINKS = NO
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+GENERATE_XML = NO
+XML_OUTPUT = xml
+XML_SCHEMA =
+XML_DTD =
+XML_PROGRAMLISTING = YES
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+GENERATE_AUTOGEN_DEF = NO
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+GENERATE_PERLMOD = NO
+PERLMOD_LATEX = NO
+PERLMOD_PRETTY = YES
+PERLMOD_MAKEVAR_PREFIX =
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+ENABLE_PREPROCESSING = YES
+MACRO_EXPANSION = NO
+EXPAND_ONLY_PREDEF = NO
+SEARCH_INCLUDES = YES
+INCLUDE_PATH =
+INCLUDE_FILE_PATTERNS =
+PREDEFINED =
+EXPAND_AS_DEFINED =
+SKIP_FUNCTION_MACROS = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+TAGFILES =
+GENERATE_TAGFILE =
+ALLEXTERNALS = NO
+EXTERNAL_GROUPS = YES
+PERL_PATH = /usr/bin/perl
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+CLASS_DIAGRAMS = NO
+HIDE_UNDOC_RELATIONS = YES
+HAVE_DOT = NO
+CLASS_GRAPH = YES
+COLLABORATION_GRAPH = YES
+GROUP_GRAPHS = YES
+UML_LOOK = NO
+TEMPLATE_RELATIONS = NO
+INCLUDE_GRAPH = YES
+INCLUDED_BY_GRAPH = YES
+CALL_GRAPH = YES
+GRAPHICAL_HIERARCHY = YES
+DIRECTORY_GRAPH = YES
+DOT_IMAGE_FORMAT = png
+DOT_PATH =
+DOTFILE_DIRS =
+MAX_DOT_GRAPH_WIDTH = 1024
+MAX_DOT_GRAPH_HEIGHT = 1024
+MAX_DOT_GRAPH_DEPTH = 1000
+DOT_TRANSPARENT = NO
+DOT_MULTI_TARGETS = NO
+GENERATE_LEGEND = YES
+DOT_CLEANUP = YES
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+SEARCHENGINE = NO
diff --git a/maintenance/FiveUpgrade.inc b/maintenance/FiveUpgrade.inc
new file mode 100644
index 00000000..7caf6810
--- /dev/null
+++ b/maintenance/FiveUpgrade.inc
@@ -0,0 +1,1214 @@
+<?php
+
+require_once( 'cleanupDupes.inc' );
+require_once( 'userDupes.inc' );
+require_once( 'updaters.inc' );
+
+define( 'MW_UPGRADE_COPY', false );
+define( 'MW_UPGRADE_ENCODE', true );
+define( 'MW_UPGRADE_NULL', null );
+define( 'MW_UPGRADE_CALLBACK', null ); // for self-documentation only
+
+class FiveUpgrade {
+ function FiveUpgrade() {
+ global $wgDatabase;
+ $this->conversionTables = $this->prepareWindows1252();
+
+ $this->dbw =& $this->newConnection();
+ $this->dbr =& $this->streamConnection();
+
+ $this->cleanupSwaps = array();
+ $this->emailAuth = false; # don't preauthenticate emails
+ $this->maxLag = 10; # if slaves are lagged more than 10 secs, wait
+ }
+
+ function doing( $step ) {
+ return is_null( $this->step ) || $step == $this->step;
+ }
+
+ function upgrade( $step ) {
+ $this->step = $step;
+
+ $tables = array(
+ 'page',
+ 'links',
+ 'user',
+ 'image',
+ 'oldimage',
+ 'watchlist',
+ 'logging',
+ 'archive',
+ 'imagelinks',
+ 'categorylinks',
+ 'ipblocks',
+ 'recentchanges',
+ 'querycache' );
+ foreach( $tables as $table ) {
+ if( $this->doing( $table ) ) {
+ $method = 'upgrade' . ucfirst( $table );
+ $this->$method();
+ }
+ }
+
+ if( $this->doing( 'cleanup' ) ) {
+ $this->upgradeCleanup();
+ }
+ }
+
+
+ /**
+ * Open a connection to the master server with the admin rights.
+ * @return Database
+ * @access private
+ */
+ function &newConnection() {
+ global $wgDBadminuser, $wgDBadminpassword;
+ global $wgDBserver, $wgDBname;
+ $db =& new Database( $wgDBserver, $wgDBadminuser, $wgDBadminpassword, $wgDBname );
+ return $db;
+ }
+
+ /**
+ * Open a second connection to the master server, with buffering off.
+ * This will let us stream large datasets in and write in chunks on the
+ * other end.
+ * @return Database
+ * @access private
+ */
+ function &streamConnection() {
+ $timeout = 3600 * 24;
+ $db =& $this->newConnection();
+ $db->bufferResults( false );
+ $db->query( "SET net_read_timeout=$timeout" );
+ $db->query( "SET net_write_timeout=$timeout" );
+ return $db;
+ }
+
+ /**
+ * Prepare a conversion array for converting Windows Code Page 1252 to
+ * UTF-8. This should provide proper conversion of text that was miscoded
+ * as Windows-1252 by naughty user-agents, and doesn't rely on an outside
+ * iconv library.
+ *
+ * @return array
+ * @access private
+ */
+ function prepareWindows1252() {
+ # Mappings from:
+ # http://www.unicode.org/Public/MAPPINGS/VENDORS/MICSFT/WINDOWS/CP1252.TXT
+ static $cp1252 = array(
+ 0x80 => 0x20AC, #EURO SIGN
+ 0x81 => UNICODE_REPLACEMENT,
+ 0x82 => 0x201A, #SINGLE LOW-9 QUOTATION MARK
+ 0x83 => 0x0192, #LATIN SMALL LETTER F WITH HOOK
+ 0x84 => 0x201E, #DOUBLE LOW-9 QUOTATION MARK
+ 0x85 => 0x2026, #HORIZONTAL ELLIPSIS
+ 0x86 => 0x2020, #DAGGER
+ 0x87 => 0x2021, #DOUBLE DAGGER
+ 0x88 => 0x02C6, #MODIFIER LETTER CIRCUMFLEX ACCENT
+ 0x89 => 0x2030, #PER MILLE SIGN
+ 0x8A => 0x0160, #LATIN CAPITAL LETTER S WITH CARON
+ 0x8B => 0x2039, #SINGLE LEFT-POINTING ANGLE QUOTATION MARK
+ 0x8C => 0x0152, #LATIN CAPITAL LIGATURE OE
+ 0x8D => UNICODE_REPLACEMENT,
+ 0x8E => 0x017D, #LATIN CAPITAL LETTER Z WITH CARON
+ 0x8F => UNICODE_REPLACEMENT,
+ 0x90 => UNICODE_REPLACEMENT,
+ 0x91 => 0x2018, #LEFT SINGLE QUOTATION MARK
+ 0x92 => 0x2019, #RIGHT SINGLE QUOTATION MARK
+ 0x93 => 0x201C, #LEFT DOUBLE QUOTATION MARK
+ 0x94 => 0x201D, #RIGHT DOUBLE QUOTATION MARK
+ 0x95 => 0x2022, #BULLET
+ 0x96 => 0x2013, #EN DASH
+ 0x97 => 0x2014, #EM DASH
+ 0x98 => 0x02DC, #SMALL TILDE
+ 0x99 => 0x2122, #TRADE MARK SIGN
+ 0x9A => 0x0161, #LATIN SMALL LETTER S WITH CARON
+ 0x9B => 0x203A, #SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
+ 0x9C => 0x0153, #LATIN SMALL LIGATURE OE
+ 0x9D => UNICODE_REPLACEMENT,
+ 0x9E => 0x017E, #LATIN SMALL LETTER Z WITH CARON
+ 0x9F => 0x0178, #LATIN CAPITAL LETTER Y WITH DIAERESIS
+ );
+ $pairs = array();
+ for( $i = 0; $i < 0x100; $i++ ) {
+ $unicode = isset( $cp1252[$i] ) ? $cp1252[$i] : $i;
+ $pairs[chr( $i )] = codepointToUtf8( $unicode );
+ }
+ return $pairs;
+ }
+
+ /**
+ * Convert from 8-bit Windows-1252 to UTF-8 if necessary.
+ * @param string $text
+ * @return string
+ * @access private
+ */
+ function conv( $text ) {
+ global $wgUseLatin1;
+ return is_null( $text )
+ ? null
+ : ( $wgUseLatin1
+ ? strtr( $text, $this->conversionTables )
+ : $text );
+ }
+
+ /**
+ * Dump timestamp and message to output
+ * @param string $message
+ * @access private
+ */
+ function log( $message ) {
+ global $wgDBname;
+ echo $wgDBname . ' ' . wfTimestamp( TS_DB ) . ': ' . $message . "\n";
+ flush();
+ }
+
+ /**
+ * Initialize the chunked-insert system.
+ * Rows will be inserted in chunks of the given number, rather
+ * than in a giant INSERT...SELECT query, to keep the serialized
+ * MySQL database replication from getting hung up. This way other
+ * things can be going on during conversion without waiting for
+ * slaves to catch up as badly.
+ *
+ * @param int $chunksize Number of rows to insert at once
+ * @param int $final Total expected number of rows / id of last row,
+ * used for progress reports.
+ * @param string $table to insert on
+ * @param string $fname function name to report in SQL
+ * @access private
+ */
+ function setChunkScale( $chunksize, $final, $table, $fname ) {
+ $this->chunkSize = $chunksize;
+ $this->chunkFinal = $final;
+ $this->chunkCount = 0;
+ $this->chunkStartTime = wfTime();
+ $this->chunkOptions = array( 'IGNORE' );
+ $this->chunkTable = $table;
+ $this->chunkFunction = $fname;
+ }
+
+ /**
+ * Chunked inserts: perform an insert if we've reached the chunk limit.
+ * Prints a progress report with estimated completion time.
+ * @param array &$chunk -- This will be emptied if an insert is done.
+ * @param int $key A key identifier to use in progress estimation in
+ * place of the number of rows inserted. Use this if
+ * you provided a max key number instead of a count
+ * as the final chunk number in setChunkScale()
+ * @access private
+ */
+ function addChunk( &$chunk, $key = null ) {
+ if( count( $chunk ) >= $this->chunkSize ) {
+ $this->insertChunk( $chunk );
+
+ $this->chunkCount += count( $chunk );
+ $now = wfTime();
+ $delta = $now - $this->chunkStartTime;
+ $rate = $this->chunkCount / $delta;
+
+ if( is_null( $key ) ) {
+ $completed = $this->chunkCount;
+ } else {
+ $completed = $key;
+ }
+ $portion = $completed / $this->chunkFinal;
+
+ $estimatedTotalTime = $delta / $portion;
+ $eta = $this->chunkStartTime + $estimatedTotalTime;
+
+ printf( "%s: %6.2f%% done on %s; ETA %s [%d/%d] %.2f/sec\n",
+ wfTimestamp( TS_DB, intval( $now ) ),
+ $portion * 100.0,
+ $this->chunkTable,
+ wfTimestamp( TS_DB, intval( $eta ) ),
+ $completed,
+ $this->chunkFinal,
+ $rate );
+ flush();
+
+ $chunk = array();
+ }
+ }
+
+ /**
+ * Chunked inserts: perform an insert unconditionally, at the end, and log.
+ * @param array &$chunk -- This will be emptied if an insert is done.
+ * @access private
+ */
+ function lastChunk( &$chunk ) {
+ $n = count( $chunk );
+ if( $n > 0 ) {
+ $this->insertChunk( $chunk );
+ }
+ $this->log( "100.00% done on $this->chunkTable (last chunk $n rows)." );
+ }
+
+ /**
+ * Chunked inserts: perform an insert.
+ * @param array &$chunk -- This will be emptied if an insert is done.
+ * @access private
+ */
+ function insertChunk( &$chunk ) {
+ // Give slaves a chance to catch up
+ wfWaitForSlaves( $this->maxLag );
+ $this->dbw->insert( $this->chunkTable, $chunk, $this->chunkFunction, $this->chunkOptions );
+ }
+
+
+ /**
+ * Copy and transcode a table to table_temp.
+ * @param string $name Base name of the source table
+ * @param string $tabledef CREATE TABLE definition, w/ $1 for the name
+ * @param array $fields set of destination fields to these constants:
+ * MW_UPGRADE_COPY - straight copy
+ * MW_UPGRADE_ENCODE - for old Latin1 wikis, conv to UTF-8
+ * MW_UPGRADE_NULL - just put NULL
+ * @param callable $callback An optional callback to modify the data
+ * or perform other processing. Func should be
+ * ( object $row, array $copy ) and return $copy
+ * @access private
+ */
+ function copyTable( $name, $tabledef, $fields, $callback = null ) {
+ $fname = 'FiveUpgrade::copyTable';
+
+ $name_temp = $name . '_temp';
+ $this->log( "Migrating $name table to $name_temp..." );
+
+ $table = $this->dbw->tableName( $name );
+ $table_temp = $this->dbw->tableName( $name_temp );
+
+ // Create temporary table; we're going to copy everything in there,
+ // then at the end rename the final tables into place.
+ $def = str_replace( '$1', $table_temp, $tabledef );
+ $this->dbw->query( $def, $fname );
+
+ $numRecords = $this->dbw->selectField( $name, 'COUNT(*)', '', $fname );
+ $this->setChunkScale( 100, $numRecords, $name_temp, $fname );
+
+ // Pull all records from the second, streaming database connection.
+ $sourceFields = array_keys( array_filter( $fields,
+ create_function( '$x', 'return $x !== MW_UPGRADE_NULL;' ) ) );
+ $result = $this->dbr->select( $name,
+ $sourceFields,
+ '',
+ $fname );
+
+ $add = array();
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $copy = array();
+ foreach( $fields as $field => $source ) {
+ if( $source === MW_UPGRADE_COPY ) {
+ $copy[$field] = $row->$field;
+ } elseif( $source === MW_UPGRADE_ENCODE ) {
+ $copy[$field] = $this->conv( $row->$field );
+ } elseif( $source === MW_UPGRADE_NULL ) {
+ $copy[$field] = null;
+ } else {
+ $this->log( "Unknown field copy type: $field => $source" );
+ }
+ }
+ if( is_callable( $callback ) ) {
+ $copy = call_user_func( $callback, $row, $copy );
+ }
+ $add[] = $copy;
+ $this->addChunk( $add );
+ }
+ $this->lastChunk( $add );
+ $this->dbr->freeResult( $result );
+
+ $this->log( "Done converting $name." );
+ $this->cleanupSwaps[] = $name;
+ }
+
+ function upgradePage() {
+ $fname = "FiveUpgrade::upgradePage";
+ $chunksize = 100;
+
+ if( $this->dbw->tableExists( 'page' ) ) {
+ $this->log( 'Page table already exists; aborting.' );
+ die( -1 );
+ }
+
+ $this->log( "Checking cur table for unique title index and applying if necessary" );
+ checkDupes( true );
+
+ $this->log( "...converting from cur/old to page/revision/text DB structure." );
+
+ extract( $this->dbw->tableNames( 'cur', 'old', 'page', 'revision', 'text' ) );
+
+ $this->log( "Creating page and revision tables..." );
+ $this->dbw->query("CREATE TABLE $page (
+ page_id int(8) unsigned NOT NULL auto_increment,
+ page_namespace int NOT NULL,
+ page_title varchar(255) binary NOT NULL,
+ page_restrictions tinyblob NOT NULL default '',
+ page_counter bigint(20) unsigned NOT NULL default '0',
+ page_is_redirect tinyint(1) unsigned NOT NULL default '0',
+ page_is_new tinyint(1) unsigned NOT NULL default '0',
+ page_random real unsigned NOT NULL,
+ page_touched char(14) binary NOT NULL default '',
+ page_latest int(8) unsigned NOT NULL,
+ page_len int(8) unsigned NOT NULL,
+
+ PRIMARY KEY page_id (page_id),
+ UNIQUE INDEX name_title (page_namespace,page_title),
+ INDEX (page_random),
+ INDEX (page_len)
+ ) TYPE=InnoDB", $fname );
+ $this->dbw->query("CREATE TABLE $revision (
+ rev_id int(8) unsigned NOT NULL auto_increment,
+ rev_page int(8) unsigned NOT NULL,
+ rev_text_id int(8) unsigned NOT NULL,
+ rev_comment tinyblob NOT NULL default '',
+ rev_user int(5) unsigned NOT NULL default '0',
+ rev_user_text varchar(255) binary NOT NULL default '',
+ rev_timestamp char(14) binary NOT NULL default '',
+ rev_minor_edit tinyint(1) unsigned NOT NULL default '0',
+ rev_deleted tinyint(1) unsigned NOT NULL default '0',
+
+ PRIMARY KEY rev_page_id (rev_page, rev_id),
+ UNIQUE INDEX rev_id (rev_id),
+ INDEX rev_timestamp (rev_timestamp),
+ INDEX page_timestamp (rev_page,rev_timestamp),
+ INDEX user_timestamp (rev_user,rev_timestamp),
+ INDEX usertext_timestamp (rev_user_text,rev_timestamp)
+ ) TYPE=InnoDB", $fname );
+
+ $maxold = intval( $this->dbw->selectField( 'old', 'max(old_id)', '', $fname ) );
+ $this->log( "Last old record is {$maxold}" );
+
+ global $wgLegacySchemaConversion;
+ if( $wgLegacySchemaConversion ) {
+ // Create HistoryBlobCurStub entries.
+ // Text will be pulled from the leftover 'cur' table at runtime.
+ echo "......Moving metadata from cur; using blob references to text in cur table.\n";
+ $cur_text = "concat('O:18:\"historyblobcurstub\":1:{s:6:\"mCurId\";i:',cur_id,';}')";
+ $cur_flags = "'object'";
+ } else {
+ // Copy all cur text in immediately: this may take longer but avoids
+ // having to keep an extra table around.
+ echo "......Moving text from cur.\n";
+ $cur_text = 'cur_text';
+ $cur_flags = "''";
+ }
+
+ $maxcur = $this->dbw->selectField( 'cur', 'max(cur_id)', '', $fname );
+ $this->log( "Last cur entry is $maxcur" );
+
+ /**
+ * Copy placeholder records for each page's current version into old
+ * Don't do any conversion here; text records are converted at runtime
+ * based on the flags (and may be originally binary!) while the meta
+ * fields will be converted in the old -> rev and cur -> page steps.
+ */
+ $this->setChunkScale( $chunksize, $maxcur, 'old', $fname );
+ $result = $this->dbr->query(
+ "SELECT cur_id, cur_namespace, cur_title, $cur_text AS text, cur_comment,
+ cur_user, cur_user_text, cur_timestamp, cur_minor_edit, $cur_flags AS flags
+ FROM $cur
+ ORDER BY cur_id", $fname );
+ $add = array();
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $add[] = array(
+ 'old_namespace' => $row->cur_namespace,
+ 'old_title' => $row->cur_title,
+ 'old_text' => $row->text,
+ 'old_comment' => $row->cur_comment,
+ 'old_user' => $row->cur_user,
+ 'old_user_text' => $row->cur_user_text,
+ 'old_timestamp' => $row->cur_timestamp,
+ 'old_minor_edit' => $row->cur_minor_edit,
+ 'old_flags' => $row->flags );
+ $this->addChunk( $add, $row->cur_id );
+ }
+ $this->lastChunk( $add );
+ $this->dbr->freeResult( $result );
+
+ /**
+ * Copy revision metadata from old into revision.
+ * We'll also do UTF-8 conversion of usernames and comments.
+ */
+ #$newmaxold = $this->dbw->selectField( 'old', 'max(old_id)', '', $fname );
+ #$this->setChunkScale( $chunksize, $newmaxold, 'revision', $fname );
+ #$countold = $this->dbw->selectField( 'old', 'count(old_id)', '', $fname );
+ $countold = $this->dbw->selectField( 'old', 'max(old_id)', '', $fname );
+ $this->setChunkScale( $chunksize, $countold, 'revision', $fname );
+
+ $this->log( "......Setting up revision table." );
+ $result = $this->dbr->query(
+ "SELECT old_id, cur_id, old_comment, old_user, old_user_text,
+ old_timestamp, old_minor_edit
+ FROM $old,$cur WHERE old_namespace=cur_namespace AND old_title=cur_title",
+ $fname );
+
+ $add = array();
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $add[] = array(
+ 'rev_id' => $row->old_id,
+ 'rev_page' => $row->cur_id,
+ 'rev_text_id' => $row->old_id,
+ 'rev_comment' => $this->conv( $row->old_comment ),
+ 'rev_user' => $row->old_user,
+ 'rev_user_text' => $this->conv( $row->old_user_text ),
+ 'rev_timestamp' => $row->old_timestamp,
+ 'rev_minor_edit' => $row->old_minor_edit );
+ $this->addChunk( $add );
+ }
+ $this->lastChunk( $add );
+ $this->dbr->freeResult( $result );
+
+
+ /**
+ * Copy page metadata from cur into page.
+ * We'll also do UTF-8 conversion of titles.
+ */
+ $this->log( "......Setting up page table." );
+ $this->setChunkScale( $chunksize, $maxcur, 'page', $fname );
+ $result = $this->dbr->query( "
+ SELECT cur_id, cur_namespace, cur_title, cur_restrictions, cur_counter, cur_is_redirect, cur_is_new,
+ cur_random, cur_touched, rev_id, LENGTH(cur_text) AS len
+ FROM $cur,$revision
+ WHERE cur_id=rev_page AND rev_timestamp=cur_timestamp AND rev_id > {$maxold}
+ ORDER BY cur_id", $fname );
+ $add = array();
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $add[] = array(
+ 'page_id' => $row->cur_id,
+ 'page_namespace' => $row->cur_namespace,
+ 'page_title' => $this->conv( $row->cur_title ),
+ 'page_restrictions' => $row->cur_restrictions,
+ 'page_counter' => $row->cur_counter,
+ 'page_is_redirect' => $row->cur_is_redirect,
+ 'page_is_new' => $row->cur_is_new,
+ 'page_random' => $row->cur_random,
+ 'page_touched' => $this->dbw->timestamp(),
+ 'page_latest' => $row->rev_id,
+ 'page_len' => $row->len );
+ #$this->addChunk( $add, $row->cur_id );
+ $this->addChunk( $add );
+ }
+ $this->lastChunk( $add );
+ $this->dbr->freeResult( $result );
+
+ $this->log( "...done with cur/old -> page/revision." );
+ }
+
+ function upgradeLinks() {
+ $fname = 'FiveUpgrade::upgradeLinks';
+ $chunksize = 200;
+ extract( $this->dbw->tableNames( 'links', 'brokenlinks', 'pagelinks', 'cur' ) );
+
+ $this->log( 'Checking for interwiki table change in case of bogus items...' );
+ if( $this->dbw->fieldExists( 'interwiki', 'iw_trans' ) ) {
+ $this->log( 'interwiki has iw_trans.' );
+ } else {
+ $this->log( 'adding iw_trans...' );
+ dbsource( 'maintenance/archives/patch-interwiki-trans.sql', $this->dbw );
+ $this->log( 'added iw_trans.' );
+ }
+
+ $this->log( 'Creating pagelinks table...' );
+ $this->dbw->query( "
+CREATE TABLE $pagelinks (
+ -- Key to the page_id of the page containing the link.
+ pl_from int(8) unsigned NOT NULL default '0',
+
+ -- Key to page_namespace/page_title of the target page.
+ -- The target page may or may not exist, and due to renames
+ -- and deletions may refer to different page records as time
+ -- goes by.
+ pl_namespace int NOT NULL default '0',
+ pl_title varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY pl_from(pl_from,pl_namespace,pl_title),
+ KEY (pl_namespace,pl_title)
+
+) TYPE=InnoDB" );
+
+ $this->log( 'Importing live links -> pagelinks' );
+ $nlinks = $this->dbw->selectField( 'links', 'count(*)', '', $fname );
+ if( $nlinks ) {
+ $this->setChunkScale( $chunksize, $nlinks, 'pagelinks', $fname );
+ $result = $this->dbr->query( "
+ SELECT l_from,cur_namespace,cur_title
+ FROM $links, $cur
+ WHERE l_to=cur_id", $fname );
+ $add = array();
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $add[] = array(
+ 'pl_from' => $row->l_from,
+ 'pl_namespace' => $row->cur_namespace,
+ 'pl_title' => $this->conv( $row->cur_title ) );
+ $this->addChunk( $add );
+ }
+ $this->lastChunk( $add );
+ } else {
+ $this->log( 'no links!' );
+ }
+
+ $this->log( 'Importing brokenlinks -> pagelinks' );
+ $nbrokenlinks = $this->dbw->selectField( 'brokenlinks', 'count(*)', '', $fname );
+ if( $nbrokenlinks ) {
+ $this->setChunkScale( $chunksize, $nbrokenlinks, 'pagelinks', $fname );
+ $result = $this->dbr->query(
+ "SELECT bl_from, bl_to FROM $brokenlinks",
+ $fname );
+ $add = array();
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $pagename = $this->conv( $row->bl_to );
+ $title = Title::newFromText( $pagename );
+ if( is_null( $title ) ) {
+ $this->log( "** invalid brokenlink: $row->bl_from -> '$pagename' (converted from '$row->bl_to')" );
+ } else {
+ $add[] = array(
+ 'pl_from' => $row->bl_from,
+ 'pl_namespace' => $title->getNamespace(),
+ 'pl_title' => $title->getDBkey() );
+ $this->addChunk( $add );
+ }
+ }
+ $this->lastChunk( $add );
+ } else {
+ $this->log( 'no brokenlinks!' );
+ }
+
+ $this->log( 'Done with links.' );
+ }
+
+ function upgradeUser() {
+ // Apply unique index, if necessary:
+ $duper = new UserDupes( $this->dbw );
+ if( $duper->hasUniqueIndex() ) {
+ $this->log( "Already have unique user_name index." );
+ } else {
+ $this->log( "Clearing user duplicates..." );
+ if( !$duper->clearDupes() ) {
+ $this->log( "WARNING: Duplicate user accounts, may explode!" );
+ }
+ }
+
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ user_id int(5) unsigned NOT NULL auto_increment,
+ user_name varchar(255) binary NOT NULL default '',
+ user_real_name varchar(255) binary NOT NULL default '',
+ user_password tinyblob NOT NULL default '',
+ user_newpassword tinyblob NOT NULL default '',
+ user_email tinytext NOT NULL default '',
+ user_options blob NOT NULL default '',
+ user_touched char(14) binary NOT NULL default '',
+ user_token char(32) binary NOT NULL default '',
+ user_email_authenticated CHAR(14) BINARY,
+ user_email_token CHAR(32) BINARY,
+ user_email_token_expires CHAR(14) BINARY,
+
+ PRIMARY KEY user_id (user_id),
+ UNIQUE INDEX user_name (user_name),
+ INDEX (user_email_token)
+
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'user_id' => MW_UPGRADE_COPY,
+ 'user_name' => MW_UPGRADE_ENCODE,
+ 'user_real_name' => MW_UPGRADE_ENCODE,
+ 'user_password' => MW_UPGRADE_COPY,
+ 'user_newpassword' => MW_UPGRADE_COPY,
+ 'user_email' => MW_UPGRADE_ENCODE,
+ 'user_options' => MW_UPGRADE_ENCODE,
+ 'user_touched' => MW_UPGRADE_CALLBACK,
+ 'user_token' => MW_UPGRADE_COPY,
+ 'user_email_authenticated' => MW_UPGRADE_CALLBACK,
+ 'user_email_token' => MW_UPGRADE_NULL,
+ 'user_email_token_expires' => MW_UPGRADE_NULL );
+ $this->copyTable( 'user', $tabledef, $fields,
+ array( &$this, 'userCallback' ) );
+ }
+
+ function userCallback( $row, $copy ) {
+ $now = $this->dbw->timestamp();
+ $copy['user_touched'] = $now;
+ $copy['user_email_authenticated'] = $this->emailAuth ? $now : null;
+ return $copy;
+ }
+
+ function upgradeImage() {
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ img_name varchar(255) binary NOT NULL default '',
+ img_size int(8) unsigned NOT NULL default '0',
+ img_width int(5) NOT NULL default '0',
+ img_height int(5) NOT NULL default '0',
+ img_metadata mediumblob NOT NULL,
+ img_bits int(3) NOT NULL default '0',
+ img_media_type ENUM("UNKNOWN", "BITMAP", "DRAWING", "AUDIO", "VIDEO", "MULTIMEDIA", "OFFICE", "TEXT", "EXECUTABLE", "ARCHIVE") default NULL,
+ img_major_mime ENUM("unknown", "application", "audio", "image", "text", "video", "message", "model", "multipart") NOT NULL default "unknown",
+ img_minor_mime varchar(32) NOT NULL default "unknown",
+ img_description tinyblob NOT NULL default '',
+ img_user int(5) unsigned NOT NULL default '0',
+ img_user_text varchar(255) binary NOT NULL default '',
+ img_timestamp char(14) binary NOT NULL default '',
+
+ PRIMARY KEY img_name (img_name),
+ INDEX img_size (img_size),
+ INDEX img_timestamp (img_timestamp)
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'img_name' => MW_UPGRADE_ENCODE,
+ 'img_size' => MW_UPGRADE_COPY,
+ 'img_width' => MW_UPGRADE_CALLBACK,
+ 'img_height' => MW_UPGRADE_CALLBACK,
+ 'img_metadata' => MW_UPGRADE_CALLBACK,
+ 'img_bits' => MW_UPGRADE_CALLBACK,
+ 'img_media_type' => MW_UPGRADE_CALLBACK,
+ 'img_major_mime' => MW_UPGRADE_CALLBACK,
+ 'img_minor_mime' => MW_UPGRADE_CALLBACK,
+ 'img_description' => MW_UPGRADE_ENCODE,
+ 'img_user' => MW_UPGRADE_COPY,
+ 'img_user_text' => MW_UPGRADE_ENCODE,
+ 'img_timestamp' => MW_UPGRADE_COPY );
+ $this->copyTable( 'image', $tabledef, $fields,
+ array( &$this, 'imageCallback' ) );
+ }
+
+ function imageCallback( $row, $copy ) {
+ global $options;
+ if( !isset( $options['noimage'] ) ) {
+ // Fill in the new image info fields
+ $info = $this->imageInfo( $row->img_name );
+
+ $copy['img_width' ] = $info['width'];
+ $copy['img_height' ] = $info['height'];
+ $copy['img_metadata' ] = ""; // loaded on-demand
+ $copy['img_bits' ] = $info['bits'];
+ $copy['img_media_type'] = $info['media'];
+ $copy['img_major_mime'] = $info['major'];
+ $copy['img_minor_mime'] = $info['minor'];
+ }
+
+ // If doing UTF8 conversion the file must be renamed
+ $this->renameFile( $row->img_name, 'wfImageDir' );
+
+ return $copy;
+ }
+
+ function imageInfo( $name, $subdirCallback='wfImageDir', $basename = null ) {
+ if( is_null( $basename ) ) $basename = $name;
+ $dir = call_user_func( $subdirCallback, $basename );
+ $filename = $dir . '/' . $name;
+ $info = array(
+ 'width' => 0,
+ 'height' => 0,
+ 'bits' => 0,
+ 'media' => '',
+ 'major' => '',
+ 'minor' => '' );
+
+ $magic =& wfGetMimeMagic();
+ $mime = $magic->guessMimeType( $filename, true );
+ list( $info['major'], $info['minor'] ) = explode( '/', $mime );
+
+ $info['media'] = $magic->getMediaType( $filename, $mime );
+
+ # Height and width
+ $gis = false;
+ if( $mime == 'image/svg' ) {
+ $gis = wfGetSVGsize( $filename );
+ } elseif( $magic->isPHPImageType( $mime ) ) {
+ $gis = getimagesize( $filename );
+ } else {
+ $this->log( "Surprising mime type: $mime" );
+ }
+ if( $gis ) {
+ $info['width' ] = $gis[0];
+ $info['height'] = $gis[1];
+ }
+ if( isset( $gis['bits'] ) ) {
+ $info['bits'] = $gis['bits'];
+ }
+
+ return $info;
+ }
+
+
+ /**
+ * Truncate a table.
+ * @param string $table The table name to be truncated
+ */
+ function clearTable( $table ) {
+ print "Clearing $table...\n";
+ $tableName = $this->db->tableName( $table );
+ $this->db->query( 'TRUNCATE $tableName' );
+ }
+
+ /**
+ * Rename a given image or archived image file to the converted filename,
+ * leaving a symlink for URL compatibility.
+ *
+ * @param string $oldname pre-conversion filename
+ * @param string $basename pre-conversion base filename for dir hashing, if an archive
+ * @access private
+ */
+ function renameFile( $oldname, $subdirCallback='wfImageDir', $basename=null ) {
+ $newname = $this->conv( $oldname );
+ if( $newname == $oldname ) {
+ // No need to rename; another field triggered this row.
+ return false;
+ }
+
+ if( is_null( $basename ) ) $basename = $oldname;
+ $ubasename = $this->conv( $basename );
+ $oldpath = call_user_func( $subdirCallback, $basename ) . '/' . $oldname;
+ $newpath = call_user_func( $subdirCallback, $ubasename ) . '/' . $newname;
+
+ $this->log( "$oldpath -> $newpath" );
+ if( rename( $oldpath, $newpath ) ) {
+ $relpath = $this->relativize( $newpath, dirname( $oldpath ) );
+ if( !symlink( $relpath, $oldpath ) ) {
+ $this->log( "... symlink failed!" );
+ }
+ return $newname;
+ } else {
+ $this->log( "... rename failed!" );
+ return false;
+ }
+ }
+
+ /**
+ * Generate a relative path name to the given file.
+ * Assumes Unix-style paths, separators, and semantics.
+ *
+ * @param string $path Absolute destination path including target filename
+ * @param string $from Absolute source path, directory only
+ * @return string
+ * @access private
+ * @static
+ */
+ function relativize( $path, $from ) {
+ $pieces = explode( '/', dirname( $path ) );
+ $against = explode( '/', $from );
+
+ // Trim off common prefix
+ while( count( $pieces ) && count( $against )
+ && $pieces[0] == $against[0] ) {
+ array_shift( $pieces );
+ array_shift( $against );
+ }
+
+ // relative dots to bump us to the parent
+ while( count( $against ) ) {
+ array_unshift( $pieces, '..' );
+ array_shift( $against );
+ }
+
+ array_push( $pieces, basename( $path ) );
+
+ return implode( '/', $pieces );
+ }
+
+ function upgradeOldImage() {
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ -- Base filename: key to image.img_name
+ oi_name varchar(255) binary NOT NULL default '',
+
+ -- Filename of the archived file.
+ -- This is generally a timestamp and '!' prepended to the base name.
+ oi_archive_name varchar(255) binary NOT NULL default '',
+
+ -- Other fields as in image...
+ oi_size int(8) unsigned NOT NULL default 0,
+ oi_width int(5) NOT NULL default 0,
+ oi_height int(5) NOT NULL default 0,
+ oi_bits int(3) NOT NULL default 0,
+ oi_description tinyblob NOT NULL default '',
+ oi_user int(5) unsigned NOT NULL default '0',
+ oi_user_text varchar(255) binary NOT NULL default '',
+ oi_timestamp char(14) binary NOT NULL default '',
+
+ INDEX oi_name (oi_name(10))
+
+) TYPE=InnoDB;
+END;
+ $fields = array(
+ 'oi_name' => MW_UPGRADE_ENCODE,
+ 'oi_archive_name' => MW_UPGRADE_ENCODE,
+ 'oi_size' => MW_UPGRADE_COPY,
+ 'oi_width' => MW_UPGRADE_CALLBACK,
+ 'oi_height' => MW_UPGRADE_CALLBACK,
+ 'oi_bits' => MW_UPGRADE_CALLBACK,
+ 'oi_description' => MW_UPGRADE_ENCODE,
+ 'oi_user' => MW_UPGRADE_COPY,
+ 'oi_user_text' => MW_UPGRADE_ENCODE,
+ 'oi_timestamp' => MW_UPGRADE_COPY );
+ $this->copyTable( 'oldimage', $tabledef, $fields,
+ array( &$this, 'oldimageCallback' ) );
+ }
+
+ function oldimageCallback( $row, $copy ) {
+ global $options;
+ if( !isset( $options['noimage'] ) ) {
+ // Fill in the new image info fields
+ $info = $this->imageInfo( $row->oi_archive_name, 'wfImageArchiveDir', $row->oi_name );
+ $copy['oi_width' ] = $info['width' ];
+ $copy['oi_height'] = $info['height'];
+ $copy['oi_bits' ] = $info['bits' ];
+ }
+
+ // If doing UTF8 conversion the file must be renamed
+ $this->renameFile( $row->oi_archive_name, 'wfImageArchiveDir', $row->oi_name );
+
+ return $copy;
+ }
+
+
+ function upgradeWatchlist() {
+ $fname = 'FiveUpgrade::upgradeWatchlist';
+ $chunksize = 100;
+
+ extract( $this->dbw->tableNames( 'watchlist', 'watchlist_temp' ) );
+
+ $this->log( 'Migrating watchlist table to watchlist_temp...' );
+ $this->dbw->query(
+"CREATE TABLE $watchlist_temp (
+ -- Key to user_id
+ wl_user int(5) unsigned NOT NULL,
+
+ -- Key to page_namespace/page_title
+ -- Note that users may watch patches which do not exist yet,
+ -- or existed in the past but have been deleted.
+ wl_namespace int NOT NULL default '0',
+ wl_title varchar(255) binary NOT NULL default '',
+
+ -- Timestamp when user was last sent a notification e-mail;
+ -- cleared when the user visits the page.
+ -- FIXME: add proper null support etc
+ wl_notificationtimestamp varchar(14) binary NOT NULL default '0',
+
+ UNIQUE KEY (wl_user, wl_namespace, wl_title),
+ KEY namespace_title (wl_namespace,wl_title)
+
+) TYPE=InnoDB;", $fname );
+
+ // Fix encoding for Latin-1 upgrades, add some fields,
+ // and double article to article+talk pairs
+ $numwatched = $this->dbw->selectField( 'watchlist', 'count(*)', '', $fname );
+
+ $this->setChunkScale( $chunksize, $numwatched * 2, 'watchlist_temp', $fname );
+ $result = $this->dbr->select( 'watchlist',
+ array(
+ 'wl_user',
+ 'wl_namespace',
+ 'wl_title' ),
+ '',
+ $fname );
+
+ $add = array();
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $now = $this->dbw->timestamp();
+ $add[] = array(
+ 'wl_user' => $row->wl_user,
+ 'wl_namespace' => Namespace::getSubject( $row->wl_namespace ),
+ 'wl_title' => $this->conv( $row->wl_title ),
+ 'wl_notificationtimestamp' => '0' );
+ $this->addChunk( $add );
+
+ $add[] = array(
+ 'wl_user' => $row->wl_user,
+ 'wl_namespace' => Namespace::getTalk( $row->wl_namespace ),
+ 'wl_title' => $this->conv( $row->wl_title ),
+ 'wl_notificationtimestamp' => '0' );
+ $this->addChunk( $add );
+ }
+ $this->lastChunk( $add );
+ $this->dbr->freeResult( $result );
+
+ $this->log( 'Done converting watchlist.' );
+ $this->cleanupSwaps[] = 'watchlist';
+ }
+
+ function upgradeLogging() {
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ -- Symbolic keys for the general log type and the action type
+ -- within the log. The output format will be controlled by the
+ -- action field, but only the type controls categorization.
+ log_type char(10) NOT NULL default '',
+ log_action char(10) NOT NULL default '',
+
+ -- Timestamp. Duh.
+ log_timestamp char(14) NOT NULL default '19700101000000',
+
+ -- The user who performed this action; key to user_id
+ log_user int unsigned NOT NULL default 0,
+
+ -- Key to the page affected. Where a user is the target,
+ -- this will point to the user page.
+ log_namespace int NOT NULL default 0,
+ log_title varchar(255) binary NOT NULL default '',
+
+ -- Freeform text. Interpreted as edit history comments.
+ log_comment varchar(255) NOT NULL default '',
+
+ -- LF separated list of miscellaneous parameters
+ log_params blob NOT NULL default '',
+
+ KEY type_time (log_type, log_timestamp),
+ KEY user_time (log_user, log_timestamp),
+ KEY page_time (log_namespace, log_title, log_timestamp)
+
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'log_type' => MW_UPGRADE_COPY,
+ 'log_action' => MW_UPGRADE_COPY,
+ 'log_timestamp' => MW_UPGRADE_COPY,
+ 'log_user' => MW_UPGRADE_COPY,
+ 'log_namespace' => MW_UPGRADE_COPY,
+ 'log_title' => MW_UPGRADE_ENCODE,
+ 'log_comment' => MW_UPGRADE_ENCODE,
+ 'log_params' => MW_UPGRADE_ENCODE );
+ $this->copyTable( 'logging', $tabledef, $fields );
+ }
+
+ function upgradeArchive() {
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ ar_namespace int NOT NULL default '0',
+ ar_title varchar(255) binary NOT NULL default '',
+ ar_text mediumblob NOT NULL default '',
+
+ ar_comment tinyblob NOT NULL default '',
+ ar_user int(5) unsigned NOT NULL default '0',
+ ar_user_text varchar(255) binary NOT NULL,
+ ar_timestamp char(14) binary NOT NULL default '',
+ ar_minor_edit tinyint(1) NOT NULL default '0',
+
+ ar_flags tinyblob NOT NULL default '',
+
+ ar_rev_id int(8) unsigned,
+ ar_text_id int(8) unsigned,
+
+ KEY name_title_timestamp (ar_namespace,ar_title,ar_timestamp)
+
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'ar_namespace' => MW_UPGRADE_COPY,
+ 'ar_title' => MW_UPGRADE_ENCODE,
+ 'ar_text' => MW_UPGRADE_COPY,
+ 'ar_comment' => MW_UPGRADE_ENCODE,
+ 'ar_user' => MW_UPGRADE_COPY,
+ 'ar_user_text' => MW_UPGRADE_ENCODE,
+ 'ar_timestamp' => MW_UPGRADE_COPY,
+ 'ar_minor_edit' => MW_UPGRADE_COPY,
+ 'ar_flags' => MW_UPGRADE_COPY,
+ 'ar_rev_id' => MW_UPGRADE_NULL,
+ 'ar_text_id' => MW_UPGRADE_NULL );
+ $this->copyTable( 'archive', $tabledef, $fields );
+ }
+
+ function upgradeImagelinks() {
+ global $wgUseLatin1;
+ if( $wgUseLatin1 ) {
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ -- Key to page_id of the page containing the image / media link.
+ il_from int(8) unsigned NOT NULL default '0',
+
+ -- Filename of target image.
+ -- This is also the page_title of the file's description page;
+ -- all such pages are in namespace 6 (NS_IMAGE).
+ il_to varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY il_from(il_from,il_to),
+ KEY (il_to)
+
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'il_from' => MW_UPGRADE_COPY,
+ 'il_to' => MW_UPGRADE_ENCODE );
+ $this->copyTable( 'imagelinks', $tabledef, $fields );
+ }
+ }
+
+ function upgradeCategorylinks() {
+ global $wgUseLatin1;
+ if( $wgUseLatin1 ) {
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ cl_from int(8) unsigned NOT NULL default '0',
+ cl_to varchar(255) binary NOT NULL default '',
+ cl_sortkey varchar(86) binary NOT NULL default '',
+ cl_timestamp timestamp NOT NULL,
+
+ UNIQUE KEY cl_from(cl_from,cl_to),
+ KEY cl_sortkey(cl_to,cl_sortkey),
+ KEY cl_timestamp(cl_to,cl_timestamp)
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'cl_from' => MW_UPGRADE_COPY,
+ 'cl_to' => MW_UPGRADE_ENCODE,
+ 'cl_sortkey' => MW_UPGRADE_ENCODE,
+ 'cl_timestamp' => MW_UPGRADE_COPY );
+ $this->copyTable( 'categorylinks', $tabledef, $fields );
+ }
+ }
+
+ function upgradeIpblocks() {
+ global $wgUseLatin1;
+ if( $wgUseLatin1 ) {
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ ipb_id int(8) NOT NULL auto_increment,
+ ipb_address varchar(40) binary NOT NULL default '',
+ ipb_user int(8) unsigned NOT NULL default '0',
+ ipb_by int(8) unsigned NOT NULL default '0',
+ ipb_reason tinyblob NOT NULL default '',
+ ipb_timestamp char(14) binary NOT NULL default '',
+ ipb_auto tinyint(1) NOT NULL default '0',
+ ipb_expiry char(14) binary NOT NULL default '',
+
+ PRIMARY KEY ipb_id (ipb_id),
+ INDEX ipb_address (ipb_address),
+ INDEX ipb_user (ipb_user)
+
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'ipb_id' => MW_UPGRADE_COPY,
+ 'ipb_address' => MW_UPGRADE_COPY,
+ 'ipb_user' => MW_UPGRADE_COPY,
+ 'ipb_by' => MW_UPGRADE_COPY,
+ 'ipb_reason' => MW_UPGRADE_ENCODE,
+ 'ipb_timestamp' => MW_UPGRADE_COPY,
+ 'ipb_auto' => MW_UPGRADE_COPY,
+ 'ipb_expiry' => MW_UPGRADE_COPY );
+ $this->copyTable( 'ipblocks', $tabledef, $fields );
+ }
+ }
+
+ function upgradeRecentchanges() {
+ // There's a format change in the namespace field
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ rc_id int(8) NOT NULL auto_increment,
+ rc_timestamp varchar(14) binary NOT NULL default '',
+ rc_cur_time varchar(14) binary NOT NULL default '',
+
+ rc_user int(10) unsigned NOT NULL default '0',
+ rc_user_text varchar(255) binary NOT NULL default '',
+
+ rc_namespace int NOT NULL default '0',
+ rc_title varchar(255) binary NOT NULL default '',
+
+ rc_comment varchar(255) binary NOT NULL default '',
+ rc_minor tinyint(3) unsigned NOT NULL default '0',
+
+ rc_bot tinyint(3) unsigned NOT NULL default '0',
+ rc_new tinyint(3) unsigned NOT NULL default '0',
+
+ rc_cur_id int(10) unsigned NOT NULL default '0',
+ rc_this_oldid int(10) unsigned NOT NULL default '0',
+ rc_last_oldid int(10) unsigned NOT NULL default '0',
+
+ rc_type tinyint(3) unsigned NOT NULL default '0',
+ rc_moved_to_ns tinyint(3) unsigned NOT NULL default '0',
+ rc_moved_to_title varchar(255) binary NOT NULL default '',
+
+ rc_patrolled tinyint(3) unsigned NOT NULL default '0',
+
+ rc_ip char(15) NOT NULL default '',
+
+ PRIMARY KEY rc_id (rc_id),
+ INDEX rc_timestamp (rc_timestamp),
+ INDEX rc_namespace_title (rc_namespace, rc_title),
+ INDEX rc_cur_id (rc_cur_id),
+ INDEX new_name_timestamp(rc_new,rc_namespace,rc_timestamp),
+ INDEX rc_ip (rc_ip)
+
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'rc_id' => MW_UPGRADE_COPY,
+ 'rc_timestamp' => MW_UPGRADE_COPY,
+ 'rc_cur_time' => MW_UPGRADE_COPY,
+ 'rc_user' => MW_UPGRADE_COPY,
+ 'rc_user_text' => MW_UPGRADE_ENCODE,
+ 'rc_namespace' => MW_UPGRADE_COPY,
+ 'rc_title' => MW_UPGRADE_ENCODE,
+ 'rc_comment' => MW_UPGRADE_ENCODE,
+ 'rc_minor' => MW_UPGRADE_COPY,
+ 'rc_bot' => MW_UPGRADE_COPY,
+ 'rc_new' => MW_UPGRADE_COPY,
+ 'rc_cur_id' => MW_UPGRADE_COPY,
+ 'rc_this_oldid' => MW_UPGRADE_COPY,
+ 'rc_last_oldid' => MW_UPGRADE_COPY,
+ 'rc_type' => MW_UPGRADE_COPY,
+ 'rc_moved_to_ns' => MW_UPGRADE_COPY,
+ 'rc_moved_to_title' => MW_UPGRADE_ENCODE,
+ 'rc_patrolled' => MW_UPGRADE_COPY,
+ 'rc_ip' => MW_UPGRADE_COPY );
+ $this->copyTable( 'recentchanges', $tabledef, $fields );
+ }
+
+ function upgradeQuerycache() {
+ // There's a format change in the namespace field
+ $tabledef = <<<END
+CREATE TABLE $1 (
+ -- A key name, generally the base name of of the special page.
+ qc_type char(32) NOT NULL,
+
+ -- Some sort of stored value. Sizes, counts...
+ qc_value int(5) unsigned NOT NULL default '0',
+
+ -- Target namespace+title
+ qc_namespace int NOT NULL default '0',
+ qc_title char(255) binary NOT NULL default '',
+
+ KEY (qc_type,qc_value)
+
+) TYPE=InnoDB
+END;
+ $fields = array(
+ 'qc_type' => MW_UPGRADE_COPY,
+ 'qc_value' => MW_UPGRADE_COPY,
+ 'qc_namespace' => MW_UPGRADE_COPY,
+ 'qc_title' => MW_UPGRADE_ENCODE );
+ $this->copyTable( 'querycache', $tabledef, $fields );
+ }
+
+ /**
+ * Rename all our temporary tables into final place.
+ * We've left things in place so a read-only wiki can continue running
+ * on the old code during all this.
+ */
+ function upgradeCleanup() {
+ $this->renameTable( 'old', 'text' );
+
+ foreach( $this->cleanupSwaps as $table ) {
+ $this->swap( $table );
+ }
+ }
+
+ function renameTable( $from, $to ) {
+ $this->log( "Renaming $from to $to..." );
+
+ $fromtable = $this->dbw->tableName( $from );
+ $totable = $this->dbw->tableName( $to );
+ $this->dbw->query( "ALTER TABLE $fromtable RENAME TO $totable" );
+ }
+
+ function swap( $base ) {
+ $this->renameTable( $base, "{$base}_old" );
+ $this->renameTable( "{$base}_temp", $base );
+ }
+
+}
+
+?>
diff --git a/maintenance/InitialiseMessages.inc b/maintenance/InitialiseMessages.inc
new file mode 100644
index 00000000..189fbd25
--- /dev/null
+++ b/maintenance/InitialiseMessages.inc
@@ -0,0 +1,240 @@
+<?php
+/**
+ * Script to initialise the MediaWiki namespace
+ *
+ * This script is included from update.php and install.php. Do not run it
+ * by itself.
+ *
+ * @deprecated
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+function initialiseMessages( $overwrite = false, $messageArray = false ) {
+ global $wgContLang, $wgContLanguageCode;
+ global $wgContLangClass, $wgAllMessagesEn;
+ global $wgDisableLangConversion;
+ global $wgForceUIMsgAsContentMsg;
+ global $wgLanguageNames;
+ global $IP;
+
+ # overwrite language conversion option so that all variants
+ # of the messages are initialised
+ $wgDisableLangConversion = false;
+
+ if ( $messageArray ) {
+ $sortedArray = $messageArray;
+ } else {
+ $sortedArray = $wgAllMessagesEn;
+ }
+
+ ksort( $sortedArray );
+ $messages=array();
+
+ $variants = $wgContLang->getVariants();
+ if(!in_array($wgContLanguageCode, $variants))
+ $variants[]=$wgContLanguageCode;
+
+ foreach ($variants as $v) {
+ $langclass = 'Language'. str_replace( '-', '_', ucfirst( $v ) );
+ if( !class_exists($langclass) ) {
+ wfDie( "class $langclass not defined. perhaps you need to include the file $langclass.php in $wgContLangClass.php?" );
+ }
+ $lang = new $langclass;
+
+ if($v==$wgContLanguageCode)
+ $suffix='';
+ else
+ $suffix="/$v";
+ foreach ($sortedArray as $key => $msg) {
+ $messages[$key.$suffix] = $lang->getMessage($key);
+ }
+ }
+
+ require_once('languages/Names.php');
+
+ /*
+ initialize all messages in $wgForceUIMsgAsContentMsg for all
+ languages in Names.php
+ */
+ if( is_array( $wgForceUIMsgAsContentMsg ) ) {
+ foreach( $wgForceUIMsgAsContentMsg as $uikey ) {
+ foreach( $wgLanguageNames as $code => $name) {
+ if( $code == $wgContLanguageCode )
+ continue;
+ $msg = $wgContLang->getMessage( $uikey );
+ if( $msg )
+ $messages[$uikey. '/' . $code] = $msg;
+ }
+ }
+ }
+ initialiseMessagesReal( $overwrite, $messages );
+}
+
+/** */
+function initialiseMessagesReal( $overwrite = false, $messageArray = false ) {
+ global $wgContLang, $wgScript, $wgServer, $wgAllMessagesEn;
+ global $wgOut, $wgArticle, $wgUser;
+ global $wgMessageCache, $wgMemc, $wgDBname, $wgUseMemCached;
+
+ # Initialise $wgOut and $wgUser for a command line script
+ $wgOut->disable();
+
+ $wgUser = new User;
+ $wgUser->setLoaded( true ); # Don't load from DB
+ $wgUser->setName( 'MediaWiki default' );
+
+ # Don't try to draw messages from the database we're initialising
+ $wgMessageCache->disable();
+ $wgMessageCache->disableTransform();
+
+ $fname = 'initialiseMessages';
+ $ns = NS_MEDIAWIKI;
+ # cur_user_text responsible for the modifications
+ # Don't change it unless you're prepared to update the DBs accordingly, otherwise the
+ # default messages won't be overwritte
+ $username = 'MediaWiki default';
+
+
+ print "Initialising \"MediaWiki\" namespace...\n";
+
+
+ $dbr =& wfGetDB( DB_SLAVE );
+ $dbw =& wfGetDB( DB_MASTER );
+ $page = $dbr->tableName( 'page' );
+ $revision = $dbr->tableName( 'revision' );
+
+ $timestamp = wfTimestampNow();
+
+ #$sql = "SELECT cur_title,cur_is_new,cur_user_text FROM $cur WHERE cur_namespace=$ns AND cur_title IN(";
+ # Get keys from $wgAllMessagesEn, which is more complete than the local language
+ $first = true;
+ if ( $messageArray ) {
+ $sortedArray = $messageArray;
+ } else {
+ $sortedArray = $wgAllMessagesEn;
+ }
+
+ ksort( $sortedArray );
+
+ # SELECT all existing messages
+ # Can't afford to be locking all rows for update, this script can take quite a long time to complete
+ $rows = array();
+ $nitems = count($sortedArray);
+ $maxitems = $dbr->maxListLen();
+ $pos = 0;
+ if ($maxitems)
+ $chunks = array_chunk($sortedArray, $maxitems);
+ else
+ $chunks = array($sortedArray);
+
+ foreach ($chunks as $chunk) {
+ $first = true;
+ $sql = "SELECT page_title,page_is_new,rev_user_text FROM $page, $revision WHERE
+ page_namespace=$ns AND rev_page=page_id AND page_title IN(";
+
+ foreach ( $chunk as $key => $enMsg ) {
+ if ( $key == '' ) {
+ continue; // Skip odd members
+ }
+ if ( $first ) {
+ $first = false;
+ } else {
+ $sql .= ',';
+ }
+ $titleObj = Title::newFromText( $wgContLang->ucfirst( $key ) );
+ $enctitle = $dbr->strencode($titleObj->getDBkey());
+ $sql .= "'$enctitle'";
+ }
+
+ $sql .= ')';
+ $res = $dbr->query( $sql );
+ while ($row = $dbr->fetchObject($res))
+ $rows[] = $row;
+ }
+
+ # Read the results into an array
+ # Decide whether or not each one needs to be overwritten
+ $existingTitles = array();
+ foreach ($rows as $row) {
+ if ( $row->rev_user_text != $username && $row->rev_user_text != 'Template namespace initialisation script' ) {
+ $existingTitles[$row->page_title] = 'keep';
+ } else {
+ $existingTitles[$row->page_title] = 'chuck';
+ }
+ }
+
+ # Insert queries are done in one multi-row insert
+ # Here's the start of it:
+ $arr = array();
+ $talk = $wgContLang->getNsText( NS_TALK );
+ $mwtalk = $wgContLang->getNsText( NS_MEDIAWIKI_TALK );
+
+ # Merge these into a single transaction for speed
+ $dbw->begin();
+
+ # Process each message
+ foreach ( $sortedArray as $key => $enMsg ) {
+ if ( $key == '' ) {
+ continue; // Skip odd members
+ }
+ # Get message text
+ if ( $messageArray ) {
+ $message = $enMsg;
+ } else {
+ $message = wfMsgNoDBForContent( $key );
+ }
+ $titleObj = Title::newFromText( $wgContLang->ucfirst( $key ), NS_MEDIAWIKI );
+ $title = $titleObj->getDBkey();
+
+ # Update messages which already exist
+ if ( array_key_exists( $title, $existingTitles ) ) {
+ if ( $existingTitles[$title] == 'chuck' || $overwrite) {
+ # Don't bother writing a new revision if we're the same
+ # as the current text!
+ $revision = Revision::newFromTitle( $titleObj );
+ if( is_null( $revision ) || $revision->getText() != $message ) {
+ $article = new Article( $titleObj );
+ $article->quickEdit( $message );
+ }
+ }
+ } else {
+ $article = new Article( $titleObj );
+ $newid = $article->insertOn( $dbw );
+ # FIXME: set restrictions
+ $revision = new Revision( array(
+ 'page' => $newid,
+ 'text' => $message,
+ 'user' => 0,
+ 'user_text' => $username,
+ 'comment' => '',
+ ) );
+ $revid = $revision->insertOn( $dbw );
+ $article->updateRevisionOn( $dbw, $revision );
+ }
+ }
+ $dbw->commit();
+
+ # Clear the relevant memcached key
+ print 'Clearing message cache...';
+ $wgMessageCache->clear();
+ print "Done.\n";
+}
+
+/** */
+function loadLanguageFile( $filename ) {
+ $contents = file_get_contents( $filename );
+ # Remove header line
+ $p = strpos( $contents, "\n" ) + 1;
+ $contents = substr( $contents, $p );
+ # Unserialize
+ return unserialize( $contents );
+}
+
+/** */
+function doUpdates() {
+ global $wgDeferredUpdateList;
+ foreach ( $wgDeferredUpdateList as $up ) { $up->doUpdate(); }
+}
+?>
diff --git a/maintenance/Makefile b/maintenance/Makefile
new file mode 100644
index 00000000..97f8b60b
--- /dev/null
+++ b/maintenance/Makefile
@@ -0,0 +1,20 @@
+.PHONY: help test test-light
+help:
+ # Run 'make test' to run the parser tests.
+ # Run 'make doc' to run the phpdoc generation.
+ # Run 'make doxydoc' (unsupported doxygen generation).
+
+test:
+ php parserTests.php
+
+test-light:
+ php parserTests.php --color=light
+
+doc:
+ php mwdocgen.php -all
+ echo 'Doc generation done. Look at ./docs/html/'
+
+doxydoc:
+ cd .. && doxygen maintenance/mwdoxygen.cfg
+ echo 'Doc generation done. Look at ./docs/html/'
+
diff --git a/maintenance/README b/maintenance/README
new file mode 100644
index 00000000..9eb69ba8
--- /dev/null
+++ b/maintenance/README
@@ -0,0 +1,85 @@
+== MediaWiki Maintenance ==
+
+The .sql scripts in this directory are not intended to be run standalone,
+although this is appropriate in some cases, e.g. manual creation of blank tables
+prior to an import.
+
+Most of the PHP scripts need to be run from the command line. Prior to doing so,
+ensure that the LocalSettings.php file in the directory above points to the
+proper installation.
+
+Certain scripts will require elevated access to the database. In order to
+provide this, first create a MySQL user with "all" permissions on the wiki
+database, and then place their username and password in an AdminSettings.php
+file in the directory above. See AdminSettings.sample for specifics on this.
+
+=== Brief explanation of files ===
+
+A lot of the files in this directory are PHP scripts used to perform various
+maintenance tasks on the wiki database, e.g. rebuilding link tables, updating
+the search indices, etc. The files in the "archives" directory are used to
+upgrade the database schema when updating the software. Some schema definitions
+for alternative (as yet unsupported) database management systems are stored
+here too.
+
+The "storage" directory contains scripts and resources useful for working with
+external storage clusters, and are not likely to be particularly useful to the
+vast majority of installations. This directory does contain the compressOld
+scripts, however, which can be useful for compacting old data.
+
+=== Maintenance scripts ===
+
+As noted above, these should be run from the command line. Not all scripts are
+listed, as some are Wikimedia-specific, and some are not applicable to most
+installations.
+
+ changePassword.php
+ Reset the password of a specified user
+
+ cleanupSpam.php
+ Mass-revert insertion of linkspam
+
+ deleteOldRevisions.php
+ Erase old revisions of pages from the database
+
+ dumpBackup.php
+ Backup dump script
+
+ dumpHTML.php
+ Produce an HTML dump of a wiki
+
+ importDump.php
+ XML dump importer
+
+ importImages.php
+ Imports images into the wiki
+
+ importTextFile.php
+ Imports the contents of a text file into a wiki page
+
+ nukePage.php
+ Wipe a page and all revisions from the database
+
+ reassignEdits.php
+ Reassign edits from one user to another
+
+ rebuildImages.php
+ Update image metadata records
+
+ rebuildMessages.php
+ Update the MediaWiki namespace after changing site language
+
+ rebuildtextindex.php
+ Rebuild the fulltext search indices
+
+ refreshLinks.php
+ Rebuild the link tables
+
+ removeUnusedAccounts.php
+ Remove user accounts which have made no edits
+
+ runJobs.php
+ Immediately complete all jobs in the job queue
+
+ update.php
+ Check and upgrade the database schema to the current version \ No newline at end of file
diff --git a/maintenance/addwiki.php b/maintenance/addwiki.php
new file mode 100644
index 00000000..253033a3
--- /dev/null
+++ b/maintenance/addwiki.php
@@ -0,0 +1,210 @@
+<?php
+
+$wgNoDBParam = true;
+
+require_once( "commandLine.inc" );
+require_once( "rebuildInterwiki.inc" );
+require_once( "languages/Names.php" );
+if ( count( $args ) != 3 ) {
+ wfDie( "Usage: php addwiki.php <language> <site> <dbname>\n" );
+}
+
+addWiki( $args[0], $args[1], $args[2] );
+
+# -----------------------------------------------------------------
+
+function addWiki( $lang, $site, $dbName )
+{
+ global $IP, $wgLanguageNames, $wgDefaultExternalStore;
+
+ $name = $wgLanguageNames[$lang];
+
+ $dbw =& wfGetDB( DB_WRITE );
+ $common = "/home/wikipedia/common";
+ $maintenance = "$IP/maintenance";
+
+ print "Creating database $dbName for $lang.$site\n";
+
+ # Set up the database
+ $dbw->query( "SET table_type=Innodb" );
+ $dbw->query( "CREATE DATABASE $dbName" );
+ $dbw->selectDB( $dbName );
+
+ print "Initialising tables\n";
+ dbsource( "$maintenance/tables.sql", $dbw );
+ dbsource( "$IP/extensions/OAI/update_table.sql", $dbw );
+ $dbw->query( "INSERT INTO site_stats(ss_row_id) VALUES (1)" );
+
+ # Initialise external storage
+ if ( $wgDefaultExternalStore && preg_match( '!^DB://(.*)$!', $wgDefaultExternalStore, $m ) ) {
+ print "Initialising external storage...\n";
+ require_once( 'ExternalStoreDB.php' );
+ global $wgDBuser, $wgDBpassword, $wgExternalServers;
+ $cluster = $m[1];
+
+ # Hack
+ $wgExternalServers[$cluster][0]['user'] = $wgDBuser;
+ $wgExternalServers[$cluster][0]['password'] = $wgDBpassword;
+
+ $store = new ExternalStoreDB;
+ $extdb =& $store->getMaster( $cluster );
+ $extdb->query( "SET table_type=InnoDB" );
+ $extdb->query( "CREATE DATABASE $dbName" );
+ $extdb->selectDB( $dbName );
+ dbsource( "$maintenance/storage/blobs.sql", $extdb );
+ $extdb->immediateCommit();
+ }
+
+ $wgTitle = Title::newMainPage();
+ $wgArticle = new Article( $wgTitle );
+ $ucsite = ucfirst( $site );
+
+ $wgArticle->insertNewArticle( "
+==This subdomain is reserved for the creation of a $ucsite in '''[[:en:{$name}|{$name}]]''' language==
+
+If you can write in this language and want to collaborate in the creation of this encyclopedia then '''you''' can make it.
+
+Go ahead. Translate this page and start working on your encyclopedia.
+
+For help, see '''[[m:Help:How to start a new Wikipedia|how to start a new Wikipedia]]'''.
+
+==Sister projects==
+[http://meta.wikipedia.org Meta-Wikipedia] | [http://www.wiktionary.org Wikitonary] | [http://www.wikibooks.org Wikibooks] | [http://www.wikinews.org Wikinews] | [http://www.wikiquote.org Wikiquote] | [http://www.wikisource.org Wikisource]
+
+See the [http://www.wikipedia.org Wikipedia portal] for other language Wikipedias.
+
+[[aa:]]
+[[af:]]
+[[als:]]
+[[ar:]]
+[[de:]]
+[[en:]]
+[[as:]]
+[[ast:]]
+[[ay:]]
+[[az:]]
+[[be:]]
+[[bg:]]
+[[bn:]]
+[[bo:]]
+[[bs:]]
+[[cs:]]
+[[co:]]
+[[cs:]]
+[[cy:]]
+[[da:]]
+[[el:]]
+[[eo:]]
+[[es:]]
+[[et:]]
+[[eu:]]
+[[fa:]]
+[[fi:]]
+[[fr:]]
+[[fy:]]
+[[ga:]]
+[[gl:]]
+[[gn:]]
+[[gu:]]
+[[he:]]
+[[hi:]]
+[[hr:]]
+[[hy:]]
+[[ia:]]
+[[id:]]
+[[is:]]
+[[it:]]
+[[ja:]]
+[[ka:]]
+[[kk:]]
+[[km:]]
+[[kn:]]
+[[ko:]]
+[[ks:]]
+[[ku:]]
+[[ky:]]
+[[la:]]
+[[ln:]]
+[[lo:]]
+[[lt:]]
+[[lv:]]
+[[hu:]]
+[[mi:]]
+[[mk:]]
+[[ml:]]
+[[mn:]]
+[[mr:]]
+[[ms:]]
+[[mt:]]
+[[my:]]
+[[na:]]
+[[nah:]]
+[[nds:]]
+[[ne:]]
+[[nl:]]
+[[no:]]
+[[oc:]]
+[[om:]]
+[[pa:]]
+[[pl:]]
+[[ps:]]
+[[pt:]]
+[[qu:]]
+[[ro:]]
+[[ru:]]
+[[sa:]]
+[[si:]]
+[[sk:]]
+[[sl:]]
+[[sq:]]
+[[sr:]]
+[[sv:]]
+[[sw:]]
+[[ta:]]
+[[te:]]
+[[tg:]]
+[[th:]]
+[[tk:]]
+[[tl:]]
+[[tr:]]
+[[tt:]]
+[[ug:]]
+[[uk:]]
+[[ur:]]
+[[uz:]]
+[[vi:]]
+[[vo:]]
+[[xh:]]
+[[yo:]]
+[[za:]]
+[[zh:]]
+[[zu:]]
+", '', false, false );
+
+ print "Adding to dblists\n";
+
+ # Add to dblist
+ $file = fopen( "$common/all.dblist", "a" );
+ fwrite( $file, "$dbName\n" );
+ fclose( $file );
+
+ # Update the sublists
+ system("cd $common && ./refresh-dblist");
+
+ print "Constructing interwiki SQL\n";
+ # Rebuild interwiki tables
+ $sql = getRebuildInterwikiSQL();
+ $tempname = tempnam( '/tmp', 'addwiki' );
+ $file = fopen( $tempname, 'w' );
+ if ( !$file ) {
+ wfDie( "Error, unable to open temporary file $tempname\n" );
+ }
+ fwrite( $file, $sql );
+ fclose( $file );
+ print "Sourcing interwiki SQL\n";
+ dbsource( $tempname, $dbw );
+ unlink( $tempname );
+
+ print "Script ended. You now want to run sync-common-all to publish *dblist files (check them for duplicates first)\n";
+}
+?>
diff --git a/maintenance/alltrans.php b/maintenance/alltrans.php
new file mode 100644
index 00000000..2fdc4499
--- /dev/null
+++ b/maintenance/alltrans.php
@@ -0,0 +1,11 @@
+<?php
+/**
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+require_once('commandLine.inc');
+
+foreach(array_keys($wgAllMessagesEn) as $key)
+ echo "$key\n";
+?>
diff --git a/maintenance/apache-ampersand.diff b/maintenance/apache-ampersand.diff
new file mode 100644
index 00000000..f281ce15
--- /dev/null
+++ b/maintenance/apache-ampersand.diff
@@ -0,0 +1,53 @@
+--- orig/apache_1.3.26/src/modules/standard/mod_rewrite.h Wed Mar 13 13:05:34 2002
++++ apache_1.3.26/src/modules/standard/mod_rewrite.h Tue Oct 15 14:07:21 2002
+@@ -447,6 +447,7 @@
+ static char *rewrite_mapfunc_toupper(request_rec *r, char *key);
+ static char *rewrite_mapfunc_tolower(request_rec *r, char *key);
+ static char *rewrite_mapfunc_escape(request_rec *r, char *key);
++static char *rewrite_mapfunc_ampescape(request_rec *r, char *key);
+ static char *rewrite_mapfunc_unescape(request_rec *r, char *key);
+ static char *select_random_value_part(request_rec *r, char *value);
+ static void rewrite_rand_init(void);
+--- orig/apache_1.3.26/src/modules/standard/mod_rewrite.c Wed May 29 10:39:23 2002
++++ apache_1.3.26/src/modules/standard/mod_rewrite.c Tue Oct 15 14:07:49 2002
+@@ -502,6 +502,9 @@
+ else if (strcmp(a2+4, "unescape") == 0) {
+ new->func = rewrite_mapfunc_unescape;
+ }
++ else if (strcmp(a2+4, "ampescape") == 0) {
++ new->func = rewrite_mapfunc_ampescape;
++ }
+ else if (sconf->state == ENGINE_ENABLED) {
+ return ap_pstrcat(cmd->pool, "RewriteMap: internal map not found:",
+ a2+4, NULL);
+@@ -2982,6 +2985,30 @@
+
+ value = ap_escape_uri(r->pool, key);
+ return value;
++}
++
++static char *rewrite_mapfunc_ampescape(request_rec *r, char *key)
++{
++ /* We only need to escape the ampersand */
++ char *copy = ap_palloc(r->pool, 3 * strlen(key) + 3);
++ const unsigned char *s = (const unsigned char *)key;
++ unsigned char *d = (unsigned char *)copy;
++ unsigned c;
++
++ while ((c = *s)) {
++ if (c == '&') {
++ *d++ = '%';
++ *d++ = '2';
++ *d++ = '6';
++ }
++ else {
++ *d++ = c;
++ }
++ ++s;
++ }
++ *d = '\0';
++
++ return copy;
+ }
+
+ static char *rewrite_mapfunc_unescape(request_rec *r, char *key)
diff --git a/maintenance/archives/.htaccess b/maintenance/archives/.htaccess
new file mode 100644
index 00000000..3a428827
--- /dev/null
+++ b/maintenance/archives/.htaccess
@@ -0,0 +1 @@
+Deny from all
diff --git a/maintenance/archives/patch-archive-rev_id.sql b/maintenance/archives/patch-archive-rev_id.sql
new file mode 100644
index 00000000..375001b8
--- /dev/null
+++ b/maintenance/archives/patch-archive-rev_id.sql
@@ -0,0 +1,6 @@
+-- New field in archive table to preserve revision IDs across undeletion.
+-- Added 2005-03-10
+
+ALTER TABLE /*$wgDBprefix*/archive
+ ADD
+ ar_rev_id int(8) unsigned;
diff --git a/maintenance/archives/patch-archive-text_id.sql b/maintenance/archives/patch-archive-text_id.sql
new file mode 100644
index 00000000..f59715ff
--- /dev/null
+++ b/maintenance/archives/patch-archive-text_id.sql
@@ -0,0 +1,14 @@
+-- New field in archive table to preserve text source IDs across undeletion.
+--
+-- Older entries containing NULL in this field will contain text in the
+-- ar_text and ar_flags fields, and will cause the (re)creation of a new
+-- text record upon undeletion.
+--
+-- Newer ones will reference a text.old_id with this field, and the existing
+-- entries will be used as-is; only a revision record need be created.
+--
+-- Added 2005-05-01
+
+ALTER TABLE /*$wgDBprefix*/archive
+ ADD
+ ar_text_id int(8) unsigned;
diff --git a/maintenance/archives/patch-bot.sql b/maintenance/archives/patch-bot.sql
new file mode 100644
index 00000000..ce61884c
--- /dev/null
+++ b/maintenance/archives/patch-bot.sql
@@ -0,0 +1,11 @@
+-- Add field to recentchanges for easy filtering of bot entries
+-- edits by a user with 'bot' in user.user_rights should be
+-- marked 1 in rc_bot.
+
+-- Change made 2002-12-15 by Brion VIBBER <brion@pobox.com>
+-- this affects code in Article.php, User.php SpecialRecentchanges.php
+-- column also added to buildTables.inc
+
+ALTER TABLE /*$wgDBprefix*/recentchanges
+ ADD COLUMN rc_bot tinyint(3) unsigned NOT NULL default '0'
+ AFTER rc_minor;
diff --git a/maintenance/archives/patch-cache.sql b/maintenance/archives/patch-cache.sql
new file mode 100644
index 00000000..5651c3ce
--- /dev/null
+++ b/maintenance/archives/patch-cache.sql
@@ -0,0 +1,41 @@
+-- patch-cache.sql
+-- 2003-03-22 <brion@pobox.com>
+--
+-- Add 'last touched' fields to cur and user tables.
+-- These are useful for maintaining cache consistency.
+-- (Updates to OutputPage.php and elsewhere.)
+--
+-- cur_touched should be set to the current time whenever:
+-- * the page is updated
+-- * a linked page is created
+-- * a linked page is destroyed
+--
+-- The cur_touched time will then be compared against the
+-- timestamps of cached pages to ensure consistency; if
+-- cur_touched is later, the page must be regenerated.
+
+ALTER TABLE /*$wgDBprefix*/cur
+ ADD COLUMN cur_touched char(14) binary NOT NULL default '';
+
+-- Existing pages should be initialized to the current
+-- time so they don't needlessly rerender until they are
+-- changed for the first time:
+
+UPDATE /*$wgDBprefix*/cur
+ SET cur_touched=NOW()+0;
+
+-- user_touched should be set to the current time whenever:
+-- * the user logs in
+-- * the user saves preferences (if no longer default...?)
+-- * the user's newtalk status is altered
+--
+-- The user_touched time should also be checked against the
+-- timestamp reported by a browser requesting revalidation.
+-- If user_touched is later than the reported last modified
+-- time, the page should be rerendered with new options and
+-- sent again.
+
+ALTER TABLE /*$wgDBprefix*/user
+ ADD COLUMN user_touched char(14) binary NOT NULL default '';
+UPDATE /*$wgDBprefix*/user
+ SET user_touched=NOW()+0;
diff --git a/maintenance/archives/patch-categorylinks.sql b/maintenance/archives/patch-categorylinks.sql
new file mode 100644
index 00000000..53c82fc0
--- /dev/null
+++ b/maintenance/archives/patch-categorylinks.sql
@@ -0,0 +1,39 @@
+--
+-- Track category inclusions *used inline*
+-- This tracks a single level of category membership
+-- (folksonomic tagging, really).
+--
+CREATE TABLE /*$wgDBprefix*/categorylinks (
+ -- Key to page_id of the page defined as a category member.
+ cl_from int(8) unsigned NOT NULL default '0',
+
+ -- Name of the category.
+ -- This is also the page_title of the category's description page;
+ -- all such pages are in namespace 14 (NS_CATEGORY).
+ cl_to varchar(255) binary NOT NULL default '',
+
+ -- The title of the linking page, or an optional override
+ -- to determine sort order. Sorting is by binary order, which
+ -- isn't always ideal, but collations seem to be an exciting
+ -- and dangerous new world in MySQL...
+ --
+ -- For MySQL 4.1+ with charset set to utf8, the sort key *index*
+ -- needs cut to be smaller than 1024 bytes (at 3 bytes per char).
+ -- To sort properly on the shorter key, this field needs to be
+ -- the same shortness.
+ cl_sortkey varchar(86) binary NOT NULL default '',
+
+ -- This isn't really used at present. Provided for an optional
+ -- sorting method by approximate addition time.
+ cl_timestamp timestamp NOT NULL,
+
+ UNIQUE KEY cl_from(cl_from,cl_to),
+
+ -- This key is trouble. It's incomplete, AND it's too big
+ -- when collation is set to UTF-8. Bleeeacch!
+ KEY cl_sortkey(cl_to,cl_sortkey),
+
+ -- Not really used?
+ KEY cl_timestamp(cl_to,cl_timestamp)
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-drop-user_newtalk.sql b/maintenance/archives/patch-drop-user_newtalk.sql
new file mode 100644
index 00000000..6ec84fb3
--- /dev/null
+++ b/maintenance/archives/patch-drop-user_newtalk.sql
@@ -0,0 +1,3 @@
+-- Patch for email authentication T.Gries/M.Arndt 27.11.2004
+-- Table user_newtalk is dropped, as the table watchlist is now also used for storing user_talk-page notifications
+DROP TABLE /*$wgDBprefix*/user_newtalk;
diff --git a/maintenance/archives/patch-drop_img_type.sql b/maintenance/archives/patch-drop_img_type.sql
new file mode 100644
index 00000000..e3737617
--- /dev/null
+++ b/maintenance/archives/patch-drop_img_type.sql
@@ -0,0 +1,3 @@
+-- img_type is no longer used, delete it
+
+ALTER TABLE /*$wgDBprefix*/image DROP COLUMN img_type;
diff --git a/maintenance/archives/patch-email-authentication.sql b/maintenance/archives/patch-email-authentication.sql
new file mode 100644
index 00000000..b35b10f1
--- /dev/null
+++ b/maintenance/archives/patch-email-authentication.sql
@@ -0,0 +1,3 @@
+-- Added early in 1.5 alpha development, removed 2005-04-25
+
+ALTER TABLE /*$wgDBprefix*/user DROP COLUMN user_emailauthenticationtimestamp;
diff --git a/maintenance/archives/patch-email-notification.sql b/maintenance/archives/patch-email-notification.sql
new file mode 100644
index 00000000..f9bc0440
--- /dev/null
+++ b/maintenance/archives/patch-email-notification.sql
@@ -0,0 +1,11 @@
+-- Patch for email notification on page changes T.Gries/M.Arndt 11.09.2004
+
+-- A new column 'wl_notificationtimestamp' is added to the table 'watchlist'.
+-- When a page watched by a user X is changed by someone else, an email is sent to the watching user X
+-- if and only if the field 'wl_notificationtimestamp' is '0'. The time/date of sending the mail is then stored in that field.
+-- Further pages changes do not trigger new notification mails as long as user X has not re-visited that page.
+-- The field is reset to '0' when user X re-visits the page or when he or she resets all notification timestamps
+-- ("notification flags") at once by clicking the new button on his/her watchlist page.
+-- T. Gries/M. Arndt 11.09.2004 - December 2004
+
+ALTER TABLE /*$wgDBprefix*/watchlist ADD (wl_notificationtimestamp varchar(14) binary);
diff --git a/maintenance/archives/patch-externallinks.sql b/maintenance/archives/patch-externallinks.sql
new file mode 100644
index 00000000..d1aa5764
--- /dev/null
+++ b/maintenance/archives/patch-externallinks.sql
@@ -0,0 +1,13 @@
+--
+-- Track links to external URLs
+--
+CREATE TABLE /*$wgDBprefix*/externallinks (
+ el_from int(8) unsigned NOT NULL default '0',
+ el_to blob NOT NULL default '',
+ el_index blob NOT NULL default '',
+
+ KEY (el_from, el_to(40)),
+ KEY (el_to(60), el_from),
+ KEY (el_index(60))
+) TYPE=InnoDB;
+
diff --git a/maintenance/archives/patch-filearchive.sql b/maintenance/archives/patch-filearchive.sql
new file mode 100644
index 00000000..4bf09366
--- /dev/null
+++ b/maintenance/archives/patch-filearchive.sql
@@ -0,0 +1,51 @@
+--
+-- Record of deleted file data
+--
+CREATE TABLE /*$wgDBprefix*/filearchive (
+ -- Unique row id
+ fa_id int not null auto_increment,
+
+ -- Original base filename; key to image.img_name, page.page_title, etc
+ fa_name varchar(255) binary NOT NULL default '',
+
+ -- Filename of archived file, if an old revision
+ fa_archive_name varchar(255) binary default '',
+
+ -- Which storage bin (directory tree or object store) the file data
+ -- is stored in. Should be 'deleted' for files that have been deleted;
+ -- any other bin is not yet in use.
+ fa_storage_group varchar(16),
+
+ -- SHA-1 of the file contents plus extension, used as a key for storage.
+ -- eg 8f8a562add37052a1848ff7771a2c515db94baa9.jpg
+ --
+ -- If NULL, the file was missing at deletion time or has been purged
+ -- from the archival storage.
+ fa_storage_key varchar(64) binary default '',
+
+ -- Deletion information, if this file is deleted.
+ fa_deleted_user int,
+ fa_deleted_timestamp char(14) binary default '',
+ fa_deleted_reason text,
+
+ -- Duped fields from image
+ fa_size int(8) unsigned default '0',
+ fa_width int(5) default '0',
+ fa_height int(5) default '0',
+ fa_metadata mediumblob,
+ fa_bits int(3) default '0',
+ fa_media_type ENUM("UNKNOWN", "BITMAP", "DRAWING", "AUDIO", "VIDEO", "MULTIMEDIA", "OFFICE", "TEXT", "EXECUTABLE", "ARCHIVE") default NULL,
+ fa_major_mime ENUM("unknown", "application", "audio", "image", "text", "video", "message", "model", "multipart") default "unknown",
+ fa_minor_mime varchar(32) default "unknown",
+ fa_description tinyblob default '',
+ fa_user int(5) unsigned default '0',
+ fa_user_text varchar(255) binary default '',
+ fa_timestamp char(14) binary default '',
+
+ PRIMARY KEY (fa_id),
+ INDEX (fa_name, fa_timestamp), -- pick out by image name
+ INDEX (fa_storage_group, fa_storage_key), -- pick out dupe files
+ INDEX (fa_deleted_timestamp), -- sort by deletion time
+ INDEX (fa_deleted_user) -- sort by deleter
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-hitcounter.sql b/maintenance/archives/patch-hitcounter.sql
new file mode 100644
index 00000000..260f717f
--- /dev/null
+++ b/maintenance/archives/patch-hitcounter.sql
@@ -0,0 +1,9 @@
+--
+-- hitcounter table is used to buffer page hits before they are periodically
+-- counted and added to the cur_counter column in the cur table.
+-- December 2003
+--
+
+CREATE TABLE /*$wgDBprefix*/hitcounter (
+ hc_id INTEGER UNSIGNED NOT NULL
+) TYPE=HEAP MAX_ROWS=25000;
diff --git a/maintenance/archives/patch-image_name_primary.sql b/maintenance/archives/patch-image_name_primary.sql
new file mode 100644
index 00000000..5bd88264
--- /dev/null
+++ b/maintenance/archives/patch-image_name_primary.sql
@@ -0,0 +1,6 @@
+-- Make the image name index unique
+
+ALTER TABLE /*$wgDBprefix*/image DROP INDEX img_name;
+
+ALTER TABLE /*$wgDBprefix*/image
+ ADD PRIMARY KEY img_name (img_name);
diff --git a/maintenance/archives/patch-image_name_unique.sql b/maintenance/archives/patch-image_name_unique.sql
new file mode 100644
index 00000000..5cf02d41
--- /dev/null
+++ b/maintenance/archives/patch-image_name_unique.sql
@@ -0,0 +1,6 @@
+-- Make the image name index unique
+
+ALTER TABLE /*$wgDBprefix*/image DROP INDEX img_name;
+
+ALTER TABLE /*$wgDBprefix*/image
+ ADD UNIQUE INDEX img_name (img_name);
diff --git a/maintenance/archives/patch-img_exif.sql b/maintenance/archives/patch-img_exif.sql
new file mode 100644
index 00000000..2fd78f76
--- /dev/null
+++ b/maintenance/archives/patch-img_exif.sql
@@ -0,0 +1,3 @@
+-- Extra image exif metadata, added for 1.5 but quickly removed.
+
+ALTER TABLE /*$wgDBprefix*/image DROP img_exif;
diff --git a/maintenance/archives/patch-img_media_type.sql b/maintenance/archives/patch-img_media_type.sql
new file mode 100644
index 00000000..2356fc63
--- /dev/null
+++ b/maintenance/archives/patch-img_media_type.sql
@@ -0,0 +1,17 @@
+-- media type columns, added for 1.5
+-- this alters the scheme for 1.5, img_type is no longer used.
+
+ALTER TABLE /*$wgDBprefix*/image ADD (
+ -- Media type as defined by the MEDIATYPE_xxx constants
+ img_media_type ENUM("UNKNOWN", "BITMAP", "DRAWING", "AUDIO", "VIDEO", "MULTIMEDIA", "OFFICE", "TEXT", "EXECUTABLE", "ARCHIVE") default NULL,
+
+ -- major part of a MIME media type as defined by IANA
+ -- see http://www.iana.org/assignments/media-types/
+ img_major_mime ENUM("unknown", "application", "audio", "image", "text", "video", "message", "model", "multipart") NOT NULL default "unknown",
+
+ -- minor part of a MIME media type as defined by IANA
+ -- the minor parts are not required to adher to any standard
+ -- but should be consistent throughout the database
+ -- see http://www.iana.org/assignments/media-types/
+ img_minor_mime varchar(32) NOT NULL default "unknown"
+);
diff --git a/maintenance/archives/patch-img_metadata.sql b/maintenance/archives/patch-img_metadata.sql
new file mode 100644
index 00000000..407e4325
--- /dev/null
+++ b/maintenance/archives/patch-img_metadata.sql
@@ -0,0 +1,6 @@
+-- Moving img_exif to img_metadata, so the name won't be so confusing when we
+-- Use it for Ogg metadata or something like that.
+
+ALTER TABLE /*$wgDBprefix*/image ADD (
+ img_metadata mediumblob NOT NULL
+);
diff --git a/maintenance/archives/patch-img_width.sql b/maintenance/archives/patch-img_width.sql
new file mode 100644
index 00000000..c99bd46d
--- /dev/null
+++ b/maintenance/archives/patch-img_width.sql
@@ -0,0 +1,18 @@
+-- Extra image metadata, added for 1.5
+
+-- NOTE: as by patch-img_media_type.sql, the img_type
+-- column is no longer used and has therefore be removed from this patch
+
+ALTER TABLE /*$wgDBprefix*/image ADD (
+ img_width int(5) NOT NULL default 0,
+ img_height int(5) NOT NULL default 0,
+ img_bits int(5) NOT NULL default 0
+);
+
+ALTER TABLE /*$wgDBprefix*/oldimage ADD (
+ oi_width int(5) NOT NULL default 0,
+ oi_height int(5) NOT NULL default 0,
+ oi_bits int(3) NOT NULL default 0
+);
+
+
diff --git a/maintenance/archives/patch-indexes.sql b/maintenance/archives/patch-indexes.sql
new file mode 100644
index 00000000..23eec07d
--- /dev/null
+++ b/maintenance/archives/patch-indexes.sql
@@ -0,0 +1,24 @@
+--
+-- patch-indexes.sql
+--
+-- Fix up table indexes; new to stable release in November 2003
+--
+
+ALTER TABLE /*$wgDBprefix*/links
+ DROP INDEX l_from,
+ ADD INDEX l_from (l_from);
+
+ALTER TABLE /*$wgDBprefix*/brokenlinks
+ DROP INDEX bl_to,
+ ADD INDEX bl_to (bL_to);
+
+ALTER TABLE /*$wgDBprefix*/recentchanges
+ ADD INDEX rc_timestamp (rc_timestamp),
+ ADD INDEX rc_namespace_title (rc_namespace, rc_title),
+ ADD INDEX rc_cur_id (rc_cur_id);
+
+ALTER TABLE /*$wgDBprefix*/archive
+ ADD KEY name_title_timestamp (ar_namespace,ar_title,ar_timestamp);
+
+ALTER TABLE /*$wgDBprefix*/watchlist
+ ADD KEY namespace_title (wl_namespace,wl_title);
diff --git a/maintenance/archives/patch-interwiki-trans.sql b/maintenance/archives/patch-interwiki-trans.sql
new file mode 100644
index 00000000..2384a66a
--- /dev/null
+++ b/maintenance/archives/patch-interwiki-trans.sql
@@ -0,0 +1,2 @@
+ALTER TABLE /*$wgDBprefix*/interwiki
+ ADD COLUMN iw_trans TINYINT(1) NOT NULL DEFAULT 0;
diff --git a/maintenance/archives/patch-interwiki.sql b/maintenance/archives/patch-interwiki.sql
new file mode 100644
index 00000000..90b162ef
--- /dev/null
+++ b/maintenance/archives/patch-interwiki.sql
@@ -0,0 +1,20 @@
+-- Creates interwiki prefix<->url mapping table
+-- used from 2003-08-21 dev version.
+-- Import the default mappings from maintenance/interwiki.sql
+
+CREATE TABLE /*$wgDBprefix*/interwiki (
+ -- The interwiki prefix, (e.g. "Meatball", or the language prefix "de")
+ iw_prefix char(32) NOT NULL,
+
+ -- The URL of the wiki, with "$1" as a placeholder for an article name.
+ -- Any spaces in the name will be transformed to underscores before
+ -- insertion.
+ iw_url char(127) NOT NULL,
+
+ -- A boolean value indicating whether the wiki is in this project
+ -- (used, for example, to detect redirect loops)
+ iw_local BOOL NOT NULL,
+
+ UNIQUE KEY iw_prefix (iw_prefix)
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-inverse_timestamp.sql b/maintenance/archives/patch-inverse_timestamp.sql
new file mode 100644
index 00000000..0f7d66f1
--- /dev/null
+++ b/maintenance/archives/patch-inverse_timestamp.sql
@@ -0,0 +1,15 @@
+-- Removes the inverse_timestamp field from early 1.5 alphas.
+-- This field was used in the olden days as a crutch for sorting
+-- limitations in MySQL 3.x, but is being dropped now as an
+-- unnecessary burden. Serious wikis should be running on 4.x.
+--
+-- Updater added 2005-03-13
+
+ALTER TABLE /*$wgDBprefix*/revision
+ DROP COLUMN inverse_timestamp,
+ DROP INDEX page_timestamp,
+ DROP INDEX user_timestamp,
+ DROP INDEX usertext_timestamp,
+ ADD INDEX page_timestamp (rev_page,rev_timestamp),
+ ADD INDEX user_timestamp (rev_user,rev_timestamp),
+ ADD INDEX usertext_timestamp (rev_user_text,rev_timestamp);
diff --git a/maintenance/archives/patch-ipb_expiry.sql b/maintenance/archives/patch-ipb_expiry.sql
new file mode 100644
index 00000000..0f106d70
--- /dev/null
+++ b/maintenance/archives/patch-ipb_expiry.sql
@@ -0,0 +1,8 @@
+-- Adds the ipb_expiry field to ipblocks
+
+ALTER TABLE /*$wgDBprefix*/ipblocks ADD ipb_expiry char(14) binary NOT NULL default '';
+
+-- All IP blocks have one day expiry
+UPDATE /*$wgDBprefix*/ipblocks SET ipb_expiry = date_format(date_add(ipb_timestamp,INTERVAL 1 DAY),"%Y%m%d%H%i%s") WHERE ipb_user = 0;
+
+-- Null string is fine for user blocks, since this indicates infinity
diff --git a/maintenance/archives/patch-ipb_range_start.sql b/maintenance/archives/patch-ipb_range_start.sql
new file mode 100644
index 00000000..c31e2d9c
--- /dev/null
+++ b/maintenance/archives/patch-ipb_range_start.sql
@@ -0,0 +1,25 @@
+-- Add the range handling fields
+ALTER TABLE /*$wgDBprefix*/ipblocks
+ ADD ipb_range_start varchar(32) NOT NULL default '',
+ ADD ipb_range_end varchar(32) NOT NULL default '',
+ ADD INDEX ipb_range (ipb_range_start(8), ipb_range_end(8));
+
+
+-- Initialise fields
+-- Only range blocks match ipb_address LIKE '%/%', this fact is used in the code already
+UPDATE /*$wgDBprefix*/ipblocks
+ SET
+ ipb_range_start = LPAD(HEX(
+ (SUBSTRING_INDEX(ipb_address, '.', 1) << 24)
+ + (SUBSTRING_INDEX(SUBSTRING_INDEX(ipb_address, '.', 2), '.', -1) << 16)
+ + (SUBSTRING_INDEX(SUBSTRING_INDEX(ipb_address, '.', 3), '.', -1) << 24)
+ + (SUBSTRING_INDEX(SUBSTRING_INDEX(ipb_address, '/', 1), '.', -1)) ), 8, '0' ),
+
+ ipb_range_end = LPAD(HEX(
+ (SUBSTRING_INDEX(ipb_address, '.', 1) << 24)
+ + (SUBSTRING_INDEX(SUBSTRING_INDEX(ipb_address, '.', 2), '.', -1) << 16)
+ + (SUBSTRING_INDEX(SUBSTRING_INDEX(ipb_address, '.', 3), '.', -1) << 24)
+ + (SUBSTRING_INDEX(SUBSTRING_INDEX(ipb_address, '/', 1), '.', -1))
+ + ((1 << (32 - SUBSTRING_INDEX(ipb_address, '/', -1))) - 1) ), 8, '0' )
+
+ WHERE ipb_address LIKE '%/%';
diff --git a/maintenance/archives/patch-ipblocks.sql b/maintenance/archives/patch-ipblocks.sql
new file mode 100644
index 00000000..8e47798b
--- /dev/null
+++ b/maintenance/archives/patch-ipblocks.sql
@@ -0,0 +1,6 @@
+-- For auto-expiring blocks --
+
+ALTER TABLE /*$wgDBprefix*/ipblocks
+ ADD ipb_auto tinyint(1) NOT NULL default '0',
+ ADD ipb_id int(8) NOT NULL auto_increment,
+ ADD PRIMARY KEY (ipb_id);
diff --git a/maintenance/archives/patch-job.sql b/maintenance/archives/patch-job.sql
new file mode 100644
index 00000000..89918456
--- /dev/null
+++ b/maintenance/archives/patch-job.sql
@@ -0,0 +1,20 @@
+
+-- Jobs performed by parallel apache threads or a command-line daemon
+CREATE TABLE /*$wgDBprefix*/job (
+ job_id int(9) unsigned NOT NULL auto_increment,
+
+ -- Command name, currently only refreshLinks is defined
+ job_cmd varchar(255) NOT NULL default '',
+
+ -- Namespace and title to act on
+ -- Should be 0 and '' if the command does not operate on a title
+ job_namespace int NOT NULL,
+ job_title varchar(255) binary NOT NULL,
+
+ -- Any other parameters to the command
+ -- Presently unused, format undefined
+ job_params blob NOT NULL default '',
+
+ PRIMARY KEY job_id (job_id),
+ KEY (job_cmd, job_namespace, job_title)
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-langlinks.sql b/maintenance/archives/patch-langlinks.sql
new file mode 100644
index 00000000..9c3b7e54
--- /dev/null
+++ b/maintenance/archives/patch-langlinks.sql
@@ -0,0 +1,14 @@
+CREATE TABLE /*$wgDBprefix*/langlinks (
+ -- page_id of the referring page
+ ll_from int(8) unsigned NOT NULL default '0',
+
+ -- Language code of the target
+ ll_lang varchar(10) binary NOT NULL default '',
+
+ -- Title of the target, including namespace
+ ll_title varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY (ll_from, ll_lang),
+ KEY (ll_lang, ll_title)
+) TYPE=InnoDB;
+
diff --git a/maintenance/archives/patch-linkscc-1.3.sql b/maintenance/archives/patch-linkscc-1.3.sql
new file mode 100644
index 00000000..e397fcb9
--- /dev/null
+++ b/maintenance/archives/patch-linkscc-1.3.sql
@@ -0,0 +1,6 @@
+--
+-- linkscc table used to cache link lists in easier to digest form.
+-- New schema for 1.3 - removes old lcc_title column.
+-- May 2004
+--
+ALTER TABLE /*$wgDBprefix*/linkscc DROP COLUMN lcc_title; \ No newline at end of file
diff --git a/maintenance/archives/patch-linkscc.sql b/maintenance/archives/patch-linkscc.sql
new file mode 100644
index 00000000..91d4da56
--- /dev/null
+++ b/maintenance/archives/patch-linkscc.sql
@@ -0,0 +1,12 @@
+--
+-- linkscc table used to cache link lists in easier to digest form
+-- November 2003
+--
+-- Format later updated.
+--
+
+CREATE TABLE /*$wgDBprefix*/linkscc (
+ lcc_pageid INT UNSIGNED NOT NULL UNIQUE KEY,
+ lcc_cacheobj MEDIUMBLOB NOT NULL
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-linktables.sql b/maintenance/archives/patch-linktables.sql
new file mode 100644
index 00000000..bb9bd033
--- /dev/null
+++ b/maintenance/archives/patch-linktables.sql
@@ -0,0 +1,70 @@
+--
+-- Track links that do exist
+-- l_from and l_to key to cur_id
+--
+DROP TABLE IF EXISTS /*$wgDBprefix*/links;
+CREATE TABLE /*$wgDBprefix*/links (
+ -- Key to the page_id of the page containing the link.
+ l_from int(8) unsigned NOT NULL default '0',
+
+ -- Key to the page_id of the link target.
+ -- An unfortunate consequence of this is that rename
+ -- operations require changing the links entries for
+ -- all links to the moved page.
+ l_to int(8) unsigned NOT NULL default '0',
+
+ UNIQUE KEY l_from(l_from,l_to),
+ KEY (l_to)
+
+) TYPE=InnoDB;
+
+--
+-- Track links to pages that don't yet exist.
+-- bl_from keys to cur_id
+-- bl_to is a text link (namespace:title)
+--
+DROP TABLE IF EXISTS /*$wgDBprefix*/brokenlinks;
+CREATE TABLE /*$wgDBprefix*/brokenlinks (
+ -- Key to the page_id of the page containing the link.
+ bl_from int(8) unsigned NOT NULL default '0',
+
+ -- Text of the target page title ("namesapce:title").
+ -- Unfortunately this doesn't split the namespace index
+ -- key and therefore can't easily be joined to anything.
+ bl_to varchar(255) binary NOT NULL default '',
+ UNIQUE KEY bl_from(bl_from,bl_to),
+ KEY (bl_to)
+
+) TYPE=InnoDB;
+
+--
+-- Track links to images *used inline*
+-- il_from keys to cur_id, il_to keys to image_name.
+-- We don't distinguish live from broken links.
+--
+DROP TABLE IF EXISTS /*$wgDBprefix*/imagelinks;
+CREATE TABLE /*$wgDBprefix*/imagelinks (
+ -- Key to page_id of the page containing the image / media link.
+ il_from int(8) unsigned NOT NULL default '0',
+
+ -- Filename of target image.
+ -- This is also the page_title of the file's description page;
+ -- all such pages are in namespace 6 (NS_IMAGE).
+ il_to varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY il_from(il_from,il_to),
+ KEY (il_to)
+
+) TYPE=InnoDB;
+
+--
+-- Stores (possibly gzipped) serialized objects with
+-- cache arrays to reduce database load slurping up
+-- from links and brokenlinks.
+--
+DROP TABLE IF EXISTS /*$wgDBprefix*/linkscc;
+CREATE TABLE /*$wgDBprefix*/linkscc (
+ lcc_pageid INT UNSIGNED NOT NULL UNIQUE KEY,
+ lcc_cacheobj MEDIUMBLOB NOT NULL
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-list.txt b/maintenance/archives/patch-list.txt
new file mode 100644
index 00000000..93a63bfd
--- /dev/null
+++ b/maintenance/archives/patch-list.txt
@@ -0,0 +1,182 @@
+List of database patches and upgrades as the PediaWiki software evolves...
+
+* 2002-11-23: Search index format changed for UTF-8 wikis
+For wikis using the UTF-8 languages, the search index entries
+need to be rebuild to allow searching to work. (Other wikis
+that have been run through the old phase2->phase3 conversion
+script should also be reindexed to catch apostrophe misplacement.)
+
+Run rebuildIndex.php on your wiki.
+
+
+
+* 2002-11-27: Watchlist format changed
+Converts the user_watchlist entries out to a separate table which
+links user_id<->cur_id and can be more handily queried.
+
+Run upgradeWatchlist.php on your wiki.
+
+
+
+* 2002-12-14: Recentchanges table bot/hidden column
+Adds a column to indicate changes by registered bots (or perhaps
+later other admin actions) that should be hidden from the default
+Recentchanges list because people think they're tedious, but should
+still be available in article histories, contribs lists, and
+power-user RC lists.
+
+Run bot.sql against your database.
+
+
+
+* 2002-12-17: Watchlist format changed again
+Now using namespace, title instead of cur_id. This can track deleted/
+recreated pages better, makes it easier to handle talk pages (now with
+the auto-watch feature there's a lot more watching of talk pages!)
+and whatnot.
+
+Run patch-watchlist.sql against your database. If all is well, drop
+the oldwatchlist table which is no longer needed. (Note that this update
+also drops the vestigial user_watchlist column.)
+
+
+
+* 2002-12-26: TeX math rendering adds 'math' table
+A new 'math' table is used to cache TeX sections.
+
+Run patch-math.sql against your database, and add 'tmp' and 'math'
+subdirectories to your tree alongside the upload directory, and copy
+the 'math' source subdirectory under the wiki's PHP directory and run
+"make" to compile the texvc evaluator. (whew!)
+
+TeX support requires TeX, OCaml, and ImageMagick. If you don't want
+to use TeX support on your wiki, you can globally disable it by
+setting $wgUseTeX=false in LocalSettings.php.
+
+
+
+* 2003-01-25: searchindex table
+A new 'searchindex' table separates the fulltext index fields from
+'cur'. This enables use of InnoDB tables, which don't support fulltext
+search, for the main data, and will keep junk out of the backup dumps.
+
+Run patch-searchindex.sql on the database. If you wish to change table
+tables on the others, use 'alter table' manually. (See MySQL docs.)
+
+
+* 2003-01-24: Talk pages for anonymous users
+A new table user_newtalk contains a list of talk pages that were
+changed, both pages by anonymous and those by non-anonymous users.
+
+Run patch-usernewtalk.sql if your database was created before
+this date.
+
+
+* 2003-02-02: Math table changed
+Rerun patch-math.sql to recreate it.
+
+* 2003-02-03: Index added to USER table for performance reasons. Run
+patch-userindex.sql to create it.
+
+
+* 2003-02-09: Random table & inverse timestamps
+The random page queue table has been removed in favor of a column
+in the cur table. This eliminates the ssllooww queue refill step;
+pre-storing random indices in an indexed column means we can do the
+random sort instantly; each element is re-randomized upon selection.
+
+Also, an inverse_timestamp field has been added to the cur and old
+tables. This will allow fast index-based sorting in history lists,
+user contribs, linked recentchanges, etc with MySQL 3, which doesn't
+allow DESC ordering on an indexed field. This may be removed later
+when MySQL is found to be stable.
+
+
+* 2003-03-22: Last touched fields for caching
+'Last touched' timestamp fields have been added to the cur and user
+tables to aid in maintaining cache consistency. Web clients will
+be forced to reload a page if it has been touched since the client's
+cached copy (this will catch indirect changes like creation of
+linked pages) or if a user changes preferences or logs in anew (so
+visual changes and login status are taken into account).
+
+Run patch-cache.sql on the database to set these fields up. This is
+required for changes to OutputPage.php and elsewhere to continue
+working on an older database.
+
+
+* 2003-05-23: Index for "Oldest articles"
+"Oldest articles" needs an index on namespace, redirect and timestamp
+to be reasonably fast. (patch-oldestindex.sql)
+
+OutputPage.php User.php maintenance/buildTables.inc maintenance/patch-cache.sql maintenance/patch-list.txt
+
+* 2003-09: Ipblocks auto-expiry update
+patch-ipblocks.sql
+
+* Interwiki URL table
+Moves the interwiki prefix<->url mapping table from a static array
+into the database. If you've got a custom table, be sure to make
+your changes!
+
+Run patch-interwiki.sql to create the interwiki table, then the
+plain interwiki.sql to load up the default set of mappings.
+
+* 2003-05-30: File upload license fields
+Adds fields to 'image' table.
+INCOMPLETE, DO NOT USE
+
+
+* 2003-08-21: Interwiki URL table
+Moves the interwiki prefix<->url mapping table from a static array
+into the database. If you've got a custom table, be sure to make
+your changes!
+
+Run patch-interwiki.sql to create the interwiki table, then the
+plain interwiki.sql to load up the default set of mappings.
+
+* 2003-09: Ipblocks auto-expiry update
+patch-ipblocks.sql
+
+* Interwiki URL table
+Moves the interwiki prefix<->url mapping table from a static array
+into the database. If you've got a custom table, be sure to make
+your changes!
+
+Run patch-interwiki.sql to create the interwiki table, then the
+plain interwiki.sql to load up the default set of mappings.
+
+* 2003-11: Indexes
+Fixes up indexes on links, brokenlinks, recentchanges, watchlist,
+and archive tables to boost speed.
+
+Run patch-indexes.sql.
+
+* 2003-11: linkscc table creation
+patch-linkscc.sql
+
+
+* 2004-01-25: recentchanges additional index
+Adds an index to recentchanges to optimize Special:Newpages
+patch-rc-newindex.sql
+
+* 2004-02-14: Adds the ipb_expiry field to ipblocks
+patch-ipb_expiry.sql
+
+
+* 2004-03-11: Recreate links tables to avoid duplicating titles
+everywhere. **Rebuild your links after this with refreshLinks.php**
+
+patch-linktables.sql
+
+
+* 2004-04: Add user_real_name field
+patch-user-realname.sql
+
+* 2004-05-08: Add querycache table for caching special pages and generic
+ object cache to cover some slow operations w/o memcached.
+patch-querycache.sql
+patch-objectcache.sql
+
+* 2004-05-14: Add categorylinks table for handling category membership
+patch-categorylinks.sql
diff --git a/maintenance/archives/patch-log_params.sql b/maintenance/archives/patch-log_params.sql
new file mode 100644
index 00000000..aa00a673
--- /dev/null
+++ b/maintenance/archives/patch-log_params.sql
@@ -0,0 +1 @@
+ALTER TABLE /*$wgDBprefix*/logging ADD log_params blob NOT NULL default '';
diff --git a/maintenance/archives/patch-logging-times-index.sql b/maintenance/archives/patch-logging-times-index.sql
new file mode 100644
index 00000000..e66ceec4
--- /dev/null
+++ b/maintenance/archives/patch-logging-times-index.sql
@@ -0,0 +1,9 @@
+--
+-- patch-logging-times-index.sql
+--
+-- Add a very humble index on logging times
+--
+
+ALTER TABLE /*$wgDBprefix*/logging
+ ADD INDEX times (log_timestamp);
+
diff --git a/maintenance/archives/patch-logging-title.sql b/maintenance/archives/patch-logging-title.sql
new file mode 100644
index 00000000..c5da0dc0
--- /dev/null
+++ b/maintenance/archives/patch-logging-title.sql
@@ -0,0 +1,6 @@
+-- 1.4 betas were missing the 'binary' marker from logging.log_title,
+-- which causes a collation mismatch error on joins in MySQL 4.1.
+
+ALTER TABLE /*$wgDBprefix*/logging
+ CHANGE COLUMN log_title
+ log_title varchar(255) binary NOT NULL default '';
diff --git a/maintenance/archives/patch-logging.sql b/maintenance/archives/patch-logging.sql
new file mode 100644
index 00000000..79bb53b5
--- /dev/null
+++ b/maintenance/archives/patch-logging.sql
@@ -0,0 +1,37 @@
+-- Add the logging table and adjust recentchanges to accomodate special pages
+-- 2004-08-24
+
+CREATE TABLE /*$wgDBprefix*/logging (
+ -- Symbolic keys for the general log type and the action type
+ -- within the log. The output format will be controlled by the
+ -- action field, but only the type controls categorization.
+ log_type char(10) NOT NULL default '',
+ log_action char(10) NOT NULL default '',
+
+ -- Timestamp. Duh.
+ log_timestamp char(14) NOT NULL default '19700101000000',
+
+ -- The user who performed this action; key to user_id
+ log_user int unsigned NOT NULL default 0,
+
+ -- Key to the page affected. Where a user is the target,
+ -- this will point to the user page.
+ log_namespace int NOT NULL default 0,
+ log_title varchar(255) binary NOT NULL default '',
+
+ -- Freeform text. Interpreted as edit history comments.
+ log_comment varchar(255) NOT NULL default '',
+
+ -- LF separated list of miscellaneous parameters
+ log_params blob NOT NULL default '',
+
+ KEY type_time (log_type, log_timestamp),
+ KEY user_time (log_user, log_timestamp),
+ KEY page_time (log_namespace, log_title, log_timestamp)
+
+) TYPE=InnoDB;
+
+
+-- Change from unsigned to signed so we can store special pages
+ALTER TABLE recentchanges
+ MODIFY rc_namespace tinyint(3) NOT NULL default '0';
diff --git a/maintenance/archives/patch-math.sql b/maintenance/archives/patch-math.sql
new file mode 100644
index 00000000..aee24a8a
--- /dev/null
+++ b/maintenance/archives/patch-math.sql
@@ -0,0 +1,28 @@
+-- Creates table math used for caching TeX blocks. Needs to be run
+-- on old installations when adding TeX support (2002-12-26)
+-- Or, TeX can be disabled via $wgUseTeX=false in LocalSettings.php
+
+-- Note: math table has changed, and this script needs to be run again
+-- to create it. (2003-02-02)
+
+DROP TABLE IF EXISTS /*$wgDBprefix*/math;
+CREATE TABLE /*$wgDBprefix*/math (
+ -- Binary MD5 hash of the latex fragment, used as an identifier key.
+ math_inputhash varchar(16) NOT NULL,
+
+ -- Not sure what this is, exactly...
+ math_outputhash varchar(16) NOT NULL,
+
+ -- texvc reports how well it thinks the HTML conversion worked;
+ -- if it's a low level the PNG rendering may be preferred.
+ math_html_conservativeness tinyint(1) NOT NULL,
+
+ -- HTML output from texvc, if any
+ math_html text,
+
+ -- MathML output from texvc, if any
+ math_mathml text,
+
+ UNIQUE KEY math_inputhash (math_inputhash)
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-mimesearch-indexes.sql b/maintenance/archives/patch-mimesearch-indexes.sql
new file mode 100644
index 00000000..bd348c46
--- /dev/null
+++ b/maintenance/archives/patch-mimesearch-indexes.sql
@@ -0,0 +1,22 @@
+-- Add indexes to the mime types in image for use on Special:MIMEsearch,
+-- changes a query like
+--
+-- SELECT img_name FROM image WHERE img_major_mime = "image" AND img_minor_mime = "svg";
+-- from:
+-- +-------+------+---------------+------+---------+------+------+-------------+
+-- | table | type | possible_keys | key | key_len | ref | rows | Extra |
+-- +-------+------+---------------+------+---------+------+------+-------------+
+-- | image | ALL | NULL | NULL | NULL | NULL | 194 | Using where |
+-- +-------+------+---------------+------+---------+------+------+-------------+
+-- to:
+-- +-------+------+-------------------------------+----------------+---------+-------+------+-------------+
+-- | table | type | possible_keys | key | key_len | ref | rows | Extra |
+-- +-------+------+-------------------------------+----------------+---------+-------+------+-------------+
+-- | image | ref | img_major_mime,img_minor_mime | img_minor_mime | 32 | const | 4 | Using where |
+-- +-------+------+-------------------------------+----------------+---------+-------+------+-------------+
+
+ALTER TABLE /*$wgDBprefix*/image
+ ADD INDEX img_major_mime (img_major_mime);
+ALTER TABLE /*$wgDBprefix*/image
+ ADD INDEX img_minor_mime (img_minor_mime);
+
diff --git a/maintenance/archives/patch-objectcache.sql b/maintenance/archives/patch-objectcache.sql
new file mode 100644
index 00000000..18572aa0
--- /dev/null
+++ b/maintenance/archives/patch-objectcache.sql
@@ -0,0 +1,9 @@
+-- For a few generic cache operations if not using Memcached
+CREATE TABLE /*$wgDBprefix*/objectcache (
+ keyname char(255) binary not null default '',
+ value mediumblob,
+ exptime datetime,
+ unique key (keyname),
+ key (exptime)
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-oldestindex.sql b/maintenance/archives/patch-oldestindex.sql
new file mode 100644
index 00000000..930214fd
--- /dev/null
+++ b/maintenance/archives/patch-oldestindex.sql
@@ -0,0 +1,5 @@
+-- Add index for "Oldest articles" (Special:Ancientpages)
+-- 2003-05-23 Erik Moeller <moeller@scireview.de>
+
+ALTER TABLE /*$wgDBprefix*/cur
+ ADD INDEX namespace_redirect_timestamp(cur_namespace,cur_is_redirect,cur_timestamp);
diff --git a/maintenance/archives/patch-page_len.sql b/maintenance/archives/patch-page_len.sql
new file mode 100644
index 00000000..c32dc8d4
--- /dev/null
+++ b/maintenance/archives/patch-page_len.sql
@@ -0,0 +1,16 @@
+-- Page length field (in bytes) for current revision of page.
+-- Since page text is now stored separately, it may be compressed
+-- or otherwise difficult to calculate. Additionally, the field
+-- can be indexed for handy 'long' and 'short' page lists.
+--
+-- Added 2005-03-12
+
+ALTER TABLE /*$wgDBprefix*/page
+ ADD page_len int(8) unsigned NOT NULL,
+ ADD INDEX (page_len);
+
+-- Not accurate if upgrading from intermediate
+-- 1.5 alpha and have revision compression on.
+UPDATE /*$wgDBprefix*/page, /*$wgDBprefix*/text
+ SET page_len=LENGTH(old_text)
+ WHERE page_latest=old_id;
diff --git a/maintenance/archives/patch-pagelinks.sql b/maintenance/archives/patch-pagelinks.sql
new file mode 100644
index 00000000..7240cff9
--- /dev/null
+++ b/maintenance/archives/patch-pagelinks.sql
@@ -0,0 +1,56 @@
+--
+-- Create the new pagelinks table to merge links and brokenlinks data,
+-- and populate it.
+--
+-- Unlike the old links and brokenlinks, these records will not need to be
+-- altered when target pages are created, deleted, or renamed. This should
+-- reduce the amount of severe database frustration that happens when widely-
+-- linked pages are altered.
+--
+-- Fixups for brokenlinks to pages in namespaces need to be run after this;
+-- this is done by updaters.inc if run through the regular update scripts.
+--
+-- 2005-05-26
+--
+
+--
+-- Track page-to-page hyperlinks within the wiki.
+--
+CREATE TABLE /*$wgDBprefix*/pagelinks (
+ -- Key to the page_id of the page containing the link.
+ pl_from int(8) unsigned NOT NULL default '0',
+
+ -- Key to page_namespace/page_title of the target page.
+ -- The target page may or may not exist, and due to renames
+ -- and deletions may refer to different page records as time
+ -- goes by.
+ pl_namespace int NOT NULL default '0',
+ pl_title varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY pl_from(pl_from,pl_namespace,pl_title),
+ KEY (pl_namespace,pl_title)
+
+) TYPE=InnoDB;
+
+
+-- Import existing-page links
+INSERT
+ INTO /*$wgDBprefix*/pagelinks (pl_from,pl_namespace,pl_title)
+ SELECT l_from,page_namespace,page_title
+ FROM /*$wgDBprefix*/links, /*$wgDBprefix*/page
+ WHERE l_to=page_id;
+
+-- import brokenlinks
+-- NOTE: We'll have to fix up individual entries that aren't in main NS
+INSERT INTO /*$wgDBprefix*/pagelinks (pl_from,pl_namespace,pl_title)
+ SELECT bl_from, 0, bl_to
+ FROM /*$wgDBprefix*/brokenlinks;
+
+-- For each namespace do something like:
+--
+-- UPDATE /*$wgDBprefix*/pagelinks
+-- SET pl_namespace=$ns,
+-- pl_title=TRIM(LEADING '$prefix:' FROM pl_title)
+-- WHERE pl_namespace=0
+-- AND pl_title LIKE '$likeprefix:%'";
+--
diff --git a/maintenance/archives/patch-parsercache.sql b/maintenance/archives/patch-parsercache.sql
new file mode 100644
index 00000000..854e6c57
--- /dev/null
+++ b/maintenance/archives/patch-parsercache.sql
@@ -0,0 +1,15 @@
+--
+-- parsercache table, for cacheing complete parsed articles
+-- before they are imbedded in the skin.
+--
+
+CREATE TABLE /*$wgDBprefix*/parsercache (
+ pc_pageid INT(11) NOT NULL,
+ pc_title VARCHAR(255) NOT NULL,
+ pc_prefhash CHAR(32) NOT NULL,
+ pc_expire DATETIME NOT NULL,
+ pc_data MEDIUMBLOB NOT NULL,
+ PRIMARY KEY (pc_pageid, pc_prefhash),
+ KEY(pc_title),
+ KEY(pc_expire)
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-profiling.sql b/maintenance/archives/patch-profiling.sql
new file mode 100644
index 00000000..49b488e9
--- /dev/null
+++ b/maintenance/archives/patch-profiling.sql
@@ -0,0 +1,10 @@
+-- profiling table
+-- This is optional
+
+CREATE TABLE /*$wgDBprefix*/profiling (
+ pf_count integer not null default 0,
+ pf_time float not null default 0,
+ pf_name varchar(255) not null default '',
+ pf_server varchar(30) not null default '',
+ UNIQUE KEY pf_name_server (pf_name, pf_server)
+) TYPE=HEAP;
diff --git a/maintenance/archives/patch-querycache.sql b/maintenance/archives/patch-querycache.sql
new file mode 100644
index 00000000..7df9129e
--- /dev/null
+++ b/maintenance/archives/patch-querycache.sql
@@ -0,0 +1,16 @@
+-- Used for caching expensive grouped queries
+
+CREATE TABLE /*$wgDBprefix*/querycache (
+ -- A key name, generally the base name of of the special page.
+ qc_type char(32) NOT NULL,
+
+ -- Some sort of stored value. Sizes, counts...
+ qc_value int(5) unsigned NOT NULL default '0',
+
+ -- Target namespace+title
+ qc_namespace int NOT NULL default '0',
+ qc_title char(255) binary NOT NULL default '',
+
+ KEY (qc_type,qc_value)
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-querycacheinfo.sql b/maintenance/archives/patch-querycacheinfo.sql
new file mode 100644
index 00000000..0e34b3a5
--- /dev/null
+++ b/maintenance/archives/patch-querycacheinfo.sql
@@ -0,0 +1,12 @@
+CREATE TABLE /*$wgDBprefix*/querycache_info (
+
+ -- Special page name
+ -- Corresponds to a qc_type value
+ qci_type varchar(32) NOT NULL default '',
+
+ -- Timestamp of last update
+ qci_timestamp char(14) NOT NULL default '19700101000000',
+
+ UNIQUE KEY ( qci_type )
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-random-dateindex.sql b/maintenance/archives/patch-random-dateindex.sql
new file mode 100644
index 00000000..5d514cc3
--- /dev/null
+++ b/maintenance/archives/patch-random-dateindex.sql
@@ -0,0 +1,54 @@
+-- patch-random-dateindex.sql
+-- 2003-02-09
+--
+-- This patch does two things:
+-- * Adds cur_random column to replace random table
+-- (Requires change to SpecialRandom.php)
+-- random table no longer needs refilling
+-- Note: short-term duplicate results *are* possible, but very unlikely on large wiki
+--
+-- * Adds inverse_timestamp columns to cur and old and indexes
+-- to allow descending timestamp sort in history, contribs, etc
+-- (Requires changes to Article.php, DatabaseFunctions.php,
+-- ... )
+-- cur_timestamp inverse_timestamp
+-- 99999999999999 - 20030209222556 = 79969790777443
+-- 99999999999999 - 20030211083412 = 79969788916587
+--
+-- We won't need this on MySQL 4; there will be a removal patch later.
+
+-- Indexes:
+-- cur needs (cur_random) for random sort
+-- cur and old need (namespace,title,timestamp) index for history,watchlist,rclinked
+-- cur and old need (user,timestamp) index for contribs
+-- cur and old need (user_text,timestamp) index for contribs
+
+ALTER TABLE /*$wgDBprefix*/cur
+ DROP INDEX cur_user,
+ DROP INDEX cur_user_text,
+ ADD COLUMN cur_random real unsigned NOT NULL,
+ ADD COLUMN inverse_timestamp char(14) binary NOT NULL default '',
+ ADD INDEX (cur_random),
+ ADD INDEX name_title_timestamp (cur_namespace,cur_title,inverse_timestamp),
+ ADD INDEX user_timestamp (cur_user,inverse_timestamp),
+ ADD INDEX usertext_timestamp (cur_user_text,inverse_timestamp);
+
+UPDATE /*$wgDBprefix*/cur SET
+ inverse_timestamp=99999999999999-cur_timestamp,
+ cur_random=RAND();
+
+ALTER TABLE /*$wgDBprefix*/old
+ DROP INDEX old_user,
+ DROP INDEX old_user_text,
+ ADD COLUMN inverse_timestamp char(14) binary NOT NULL default '',
+ ADD INDEX name_title_timestamp (old_namespace,old_title,inverse_timestamp),
+ ADD INDEX user_timestamp (old_user,inverse_timestamp),
+ ADD INDEX usertext_timestamp (old_user_text,inverse_timestamp);
+
+UPDATE /*$wgDBprefix*/old SET
+ inverse_timestamp=99999999999999-old_timestamp;
+
+-- If leaving wiki publicly accessible in read-only mode during
+-- the upgrade, comment out the below line; leave 'random' table
+-- in place until the new software is installed.
+DROP TABLE /*$wgDBprefix*/random;
diff --git a/maintenance/archives/patch-rc-newindex.sql b/maintenance/archives/patch-rc-newindex.sql
new file mode 100644
index 00000000..2315ff37
--- /dev/null
+++ b/maintenance/archives/patch-rc-newindex.sql
@@ -0,0 +1,9 @@
+--
+-- patch-rc-newindex.sql
+-- Adds an index to recentchanges to optimize Special:Newpages
+-- 2004-01-25
+--
+
+ALTER TABLE /*$wgDBprefix*/recentchanges
+ ADD INDEX new_name_timestamp(rc_new,rc_namespace,rc_timestamp);
+
diff --git a/maintenance/archives/patch-rc-patrol.sql b/maintenance/archives/patch-rc-patrol.sql
new file mode 100644
index 00000000..1839c1ee
--- /dev/null
+++ b/maintenance/archives/patch-rc-patrol.sql
@@ -0,0 +1,9 @@
+--
+-- patch-rc-patrol.sql
+-- Adds a row to recentchanges for the patrolling feature
+-- 2004-08-09
+--
+
+ALTER TABLE /*$wgDBprefix*/recentchanges
+ ADD COLUMN rc_patrolled tinyint(3) unsigned NOT NULL default '0';
+
diff --git a/maintenance/archives/patch-rc_id.sql b/maintenance/archives/patch-rc_id.sql
new file mode 100644
index 00000000..6dd9ef4a
--- /dev/null
+++ b/maintenance/archives/patch-rc_id.sql
@@ -0,0 +1,7 @@
+-- Primary key in recentchanges
+
+ALTER TABLE /*$wgDBprefix*/recentchanges
+ ADD rc_id int(8) NOT NULL auto_increment,
+ ADD PRIMARY KEY rc_id (rc_id);
+
+
diff --git a/maintenance/archives/patch-rc_ip.sql b/maintenance/archives/patch-rc_ip.sql
new file mode 100644
index 00000000..a68a22cb
--- /dev/null
+++ b/maintenance/archives/patch-rc_ip.sql
@@ -0,0 +1,7 @@
+-- Adding the rc_ip field for logging of IP addresses in recentchanges
+
+ALTER TABLE /*$wgDBprefix*/recentchanges
+ ADD rc_ip char(15) NOT NULL default '',
+ ADD INDEX rc_ip (rc_ip);
+
+
diff --git a/maintenance/archives/patch-rc_type.sql b/maintenance/archives/patch-rc_type.sql
new file mode 100644
index 00000000..1097771b
--- /dev/null
+++ b/maintenance/archives/patch-rc_type.sql
@@ -0,0 +1,9 @@
+-- recentchanges improvements --
+
+ALTER TABLE /*$wgDBprefix*/recentchanges
+ ADD rc_type tinyint(3) unsigned NOT NULL default '0',
+ ADD rc_moved_to_ns tinyint(3) unsigned NOT NULL default '0',
+ ADD rc_moved_to_title varchar(255) binary NOT NULL default '';
+
+UPDATE /*$wgDBprefix*/recentchanges SET rc_type=1 WHERE rc_new;
+UPDATE /*$wgDBprefix*/recentchanges SET rc_type=3 WHERE rc_namespace=4 AND (rc_title='Deletion_log' OR rc_title='Upload_log');
diff --git a/maintenance/archives/patch-rename-group.sql b/maintenance/archives/patch-rename-group.sql
new file mode 100644
index 00000000..026b60bd
--- /dev/null
+++ b/maintenance/archives/patch-rename-group.sql
@@ -0,0 +1,10 @@
+-- Rename groups table to groups, which is not a keyword
+-- It was called group in a few alpha versions
+
+RENAME TABLE /*$wgDBprefix*/`group` TO /*$wgDBprefix*/groups;
+ALTER TABLE /*$wgDBprefix*/groups
+ CHANGE group_id gr_id int(5) unsigned NOT NULL auto_increment,
+ CHANGE group_name gr_name varchar(50) NOT NULL default '',
+ CHANGE group_description gr_description varchar(255) NOT NULL default '',
+ CHANGE group_rights gr_rights tinyblob;
+
diff --git a/maintenance/archives/patch-rename-user_groups-and_rights.sql b/maintenance/archives/patch-rename-user_groups-and_rights.sql
new file mode 100644
index 00000000..abd59319
--- /dev/null
+++ b/maintenance/archives/patch-rename-user_groups-and_rights.sql
@@ -0,0 +1,9 @@
+
+ALTER TABLE /*$wgDBprefix*/user_groups
+ CHANGE user_id ug_user INT(5) UNSIGNED NOT NULL DEFAULT '0',
+ CHANGE group_id ug_group INT(5) UNSIGNED NOT NULL DEFAULT '0';
+
+ALTER TABLE /*$wgDBprefix*/user_rights
+ CHANGE user_id ur_user INT(5) UNSIGNED NOT NULL,
+ CHANGE user_rights ur_rights TINYBLOB NOT NULL DEFAULT '';
+
diff --git a/maintenance/archives/patch-restructure.sql b/maintenance/archives/patch-restructure.sql
new file mode 100644
index 00000000..53f1836b
--- /dev/null
+++ b/maintenance/archives/patch-restructure.sql
@@ -0,0 +1,147 @@
+-- The Great Restructuring of October 2004
+-- Creates 'page', 'revision' tables and transforms the classic
+-- cur+old into a separate page+revision+text structure.
+--
+-- The pre-conversion 'old' table is renamed to 'text' and used
+-- without internal restructuring to avoid rebuilding the entire
+-- table. (This can be done separately if desired.)
+--
+-- The pre-conversion 'cur' table is now redundant and can be
+-- discarded when done.
+
+CREATE TABLE /*$wgDBprefix*/page (
+ page_id int(8) unsigned NOT NULL auto_increment,
+ page_namespace tinyint NOT NULL,
+ page_title varchar(255) binary NOT NULL,
+ page_restrictions tinyblob NOT NULL default '',
+ page_counter bigint(20) unsigned NOT NULL default '0',
+ page_is_redirect tinyint(1) unsigned NOT NULL default '0',
+ page_is_new tinyint(1) unsigned NOT NULL default '0',
+ page_random real unsigned NOT NULL,
+ page_touched char(14) binary NOT NULL default '',
+ page_latest int(8) unsigned NOT NULL,
+ page_len int(8) unsigned NOT NULL,
+
+ PRIMARY KEY page_id (page_id),
+ UNIQUE INDEX name_title (page_namespace,page_title),
+ INDEX (page_random),
+ INDEX (page_len)
+);
+
+CREATE TABLE /*$wgDBprefix*/revision (
+ rev_id int(8) unsigned NOT NULL auto_increment,
+ rev_page int(8) unsigned NOT NULL,
+ rev_comment tinyblob NOT NULL default '',
+ rev_user int(5) unsigned NOT NULL default '0',
+ rev_user_text varchar(255) binary NOT NULL default '',
+ rev_timestamp char(14) binary NOT NULL default '',
+ rev_minor_edit tinyint(1) unsigned NOT NULL default '0',
+ rev_deleted tinyint(1) unsigned NOT NULL default '0',
+
+
+ PRIMARY KEY rev_page_id (rev_page, rev_id),
+ UNIQUE INDEX rev_id (rev_id),
+ INDEX rev_timestamp (rev_timestamp),
+ INDEX page_timestamp (rev_page,rev_timestamp),
+ INDEX user_timestamp (rev_user,rev_timestamp),
+ INDEX usertext_timestamp (rev_user_text,rev_timestamp)
+);
+
+-- If creating new 'text' table it would look like this:
+--
+-- CREATE TABLE /*$wgDBprefix*/text (
+-- old_id int(8) unsigned NOT NULL auto_increment,
+-- old_text mediumtext NOT NULL default '',
+-- old_flags tinyblob NOT NULL default '',
+--
+-- PRIMARY KEY old_id (old_id)
+-- );
+
+
+-- Lock!
+LOCK TABLES /*$wgDBprefix*/page WRITE, /*$wgDBprefix*/revision WRITE, /*$wgDBprefix*/old WRITE, /*$wgDBprefix*/cur WRITE;
+
+-- Save the last old_id value for later
+SELECT (@maxold:=MAX(old_id)) FROM /*$wgDBprefix*/old;
+
+-- First, copy all current entries into the old table.
+INSERT
+ INTO /*$wgDBprefix*/old
+ (old_namespace,
+ old_title,
+ old_text,
+ old_comment,
+ old_user,
+ old_user_text,
+ old_timestamp,
+ old_minor_edit,
+ old_flags)
+ SELECT
+ cur_namespace,
+ cur_title,
+ cur_text,
+ cur_comment,
+ cur_user,
+ cur_user_text,
+ cur_timestamp,
+ cur_minor_edit,
+ ''
+ FROM /*$wgDBprefix*/cur;
+
+-- Now, copy all old data except the text into revisions
+INSERT
+ INTO /*$wgDBprefix*/revision
+ (rev_id,
+ rev_page,
+ rev_comment,
+ rev_user,
+ rev_user_text,
+ rev_timestamp,
+ rev_minor_edit)
+ SELECT
+ old_id,
+ cur_id,
+ old_comment,
+ old_user,
+ old_user_text,
+ old_timestamp,
+ old_minor_edit
+ FROM /*$wgDBprefix*/old,/*$wgDBprefix*/cur
+ WHERE old_namespace=cur_namespace
+ AND old_title=cur_title;
+
+-- And, copy the cur data into page
+INSERT
+ INTO /*$wgDBprefix*/page
+ (page_id,
+ page_namespace,
+ page_title,
+ page_restrictions,
+ page_counter,
+ page_is_redirect,
+ page_is_new,
+ page_random,
+ page_touched,
+ page_latest)
+ SELECT
+ cur_id,
+ cur_namespace,
+ cur_title,
+ cur_restrictions,
+ cur_counter,
+ cur_is_redirect,
+ cur_is_new,
+ cur_random,
+ cur_touched,
+ rev_id
+ FROM /*$wgDBprefix*/cur,/*$wgDBprefix*/revision
+ WHERE cur_id=rev_page
+ AND rev_timestamp=cur_timestamp
+ AND rev_id > @maxold;
+
+UNLOCK TABLES;
+
+-- Keep the old table around as the text store.
+-- Its extra fields will be ignored, but trimming them is slow
+-- so we won't bother doing it for now.
+ALTER TABLE /*$wgDBprefix*/old RENAME TO /*$wgDBprefix*/text;
diff --git a/maintenance/archives/patch-rev_deleted.sql b/maintenance/archives/patch-rev_deleted.sql
new file mode 100644
index 00000000..3af0c1d7
--- /dev/null
+++ b/maintenance/archives/patch-rev_deleted.sql
@@ -0,0 +1,11 @@
+--
+-- Add rev_deleted flag to revision table.
+-- Deleted revisions can thus continue to be listed in history
+-- and user contributions, and their text storage doesn't have
+-- to be disturbed.
+--
+-- 2005-03-31
+--
+
+ALTER TABLE /*$wgDBprefix*/revision
+ ADD rev_deleted tinyint(1) unsigned NOT NULL default '0';
diff --git a/maintenance/archives/patch-rev_text_id.sql b/maintenance/archives/patch-rev_text_id.sql
new file mode 100644
index 00000000..44ef438c
--- /dev/null
+++ b/maintenance/archives/patch-rev_text_id.sql
@@ -0,0 +1,17 @@
+--
+-- Adds rev_text_id field to revision table.
+-- This is a key to text.old_id, so that revisions can be stored
+-- for non-save operations without duplicating text, and so that
+-- a back-end storage system can provide its own numbering system
+-- if necessary.
+--
+-- rev.rev_id and text.old_id are no longer assumed to be the same.
+--
+-- 2005-03-28
+--
+
+ALTER TABLE /*$wgDBprefix*/revision
+ ADD rev_text_id int(8) unsigned NOT NULL;
+
+UPDATE /*$wgDBprefix*/revision
+ SET rev_text_id=rev_id;
diff --git a/maintenance/archives/patch-searchindex.sql b/maintenance/archives/patch-searchindex.sql
new file mode 100644
index 00000000..fb54dbbe
--- /dev/null
+++ b/maintenance/archives/patch-searchindex.sql
@@ -0,0 +1,40 @@
+-- Break fulltext search index out to separate table from cur
+-- This is being done mainly to allow us to use InnoDB tables
+-- for the main db while keeping the MyISAM fulltext index for
+-- search.
+
+-- 2002-12-16, 2003-01-25 Brion VIBBER <brion@pobox.com>
+
+-- Creating searchindex table...
+DROP TABLE IF EXISTS /*$wgDBprefix*/searchindex;
+CREATE TABLE /*$wgDBprefix*/searchindex (
+ -- Key to page_id
+ si_page int(8) unsigned NOT NULL,
+
+ -- Munged version of title
+ si_title varchar(255) NOT NULL default '',
+
+ -- Munged version of body text
+ si_text mediumtext NOT NULL default '',
+
+ UNIQUE KEY (si_page)
+
+) TYPE=MyISAM;
+
+-- Copying data into new table...
+INSERT INTO /*$wgDBprefix*/searchindex
+ (si_page,si_title,si_text)
+ SELECT
+ cur_id,cur_ind_title,cur_ind_text
+ FROM /*$wgDBprefix*/cur;
+
+
+-- Creating fulltext index...
+ALTER TABLE /*$wgDBprefix*/searchindex
+ ADD FULLTEXT si_title (si_title),
+ ADD FULLTEXT si_text (si_text);
+
+-- Dropping index columns from cur table.
+ALTER TABLE /*$wgDBprefix*/cur
+ DROP COLUMN cur_ind_title,
+ DROP COLUMN cur_ind_text;
diff --git a/maintenance/archives/patch-ss_images.sql b/maintenance/archives/patch-ss_images.sql
new file mode 100644
index 00000000..e1950eb6
--- /dev/null
+++ b/maintenance/archives/patch-ss_images.sql
@@ -0,0 +1,5 @@
+-- More statistics, for version 1.6
+
+ALTER TABLE /*$wgDBprefix*/site_stats ADD ss_images int(10) default '0';
+SELECT @images := COUNT(*) FROM /*$wgDBprefix*/image;
+UPDATE /*$wgDBprefix*/site_stats SET ss_images=@images;
diff --git a/maintenance/archives/patch-ss_total_articles.sql b/maintenance/archives/patch-ss_total_articles.sql
new file mode 100644
index 00000000..b4a48cf7
--- /dev/null
+++ b/maintenance/archives/patch-ss_total_articles.sql
@@ -0,0 +1,6 @@
+-- Faster statistics, as of 1.4.3
+
+ALTER TABLE /*$wgDBprefix*/site_stats
+ ADD ss_total_pages bigint(20) default -1,
+ ADD ss_users bigint(20) default -1,
+ ADD ss_admins int(10) default -1;
diff --git a/maintenance/archives/patch-templatelinks.sql b/maintenance/archives/patch-templatelinks.sql
new file mode 100644
index 00000000..49bd9c5e
--- /dev/null
+++ b/maintenance/archives/patch-templatelinks.sql
@@ -0,0 +1,19 @@
+--
+-- Track template inclusions.
+--
+CREATE TABLE /*$wgDBprefix*/templatelinks (
+ -- Key to the page_id of the page containing the link.
+ tl_from int(8) unsigned NOT NULL default '0',
+
+ -- Key to page_namespace/page_title of the target page.
+ -- The target page may or may not exist, and due to renames
+ -- and deletions may refer to different page records as time
+ -- goes by.
+ tl_namespace int NOT NULL default '0',
+ tl_title varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY tl_from(tl_from,tl_namespace,tl_title),
+ KEY (tl_namespace,tl_title)
+
+) TYPE=InnoDB;
+
diff --git a/maintenance/archives/patch-trackbacks.sql b/maintenance/archives/patch-trackbacks.sql
new file mode 100644
index 00000000..4193d058
--- /dev/null
+++ b/maintenance/archives/patch-trackbacks.sql
@@ -0,0 +1,10 @@
+CREATE TABLE /*$wgDBprefix*/trackbacks (
+ tb_id INTEGER AUTO_INCREMENT PRIMARY KEY,
+ tb_page INTEGER REFERENCES page(page_id) ON DELETE CASCADE,
+ tb_title VARCHAR(255) NOT NULL,
+ tb_url VARCHAR(255) NOT NULL,
+ tb_ex TEXT,
+ tb_name VARCHAR(255),
+
+ INDEX (tb_page)
+);
diff --git a/maintenance/archives/patch-transcache.sql b/maintenance/archives/patch-transcache.sql
new file mode 100644
index 00000000..a244bff8
--- /dev/null
+++ b/maintenance/archives/patch-transcache.sql
@@ -0,0 +1,7 @@
+CREATE TABLE /*$wgDBprefix*/transcache (
+ tc_url VARCHAR(255) NOT NULL,
+ tc_contents TEXT,
+ tc_time INT NOT NULL,
+ UNIQUE INDEX tc_url_idx(tc_url)
+) TYPE=InnoDB;
+
diff --git a/maintenance/archives/patch-user-realname.sql b/maintenance/archives/patch-user-realname.sql
new file mode 100644
index 00000000..96edaa43
--- /dev/null
+++ b/maintenance/archives/patch-user-realname.sql
@@ -0,0 +1,5 @@
+-- Add a 'real name' field where users can specify the name they want
+-- used for author attribution or other places that real names matter.
+
+ALTER TABLE user
+ ADD (user_real_name varchar(255) binary NOT NULL default '');
diff --git a/maintenance/archives/patch-user_email_token.sql b/maintenance/archives/patch-user_email_token.sql
new file mode 100644
index 00000000..d4d633b7
--- /dev/null
+++ b/maintenance/archives/patch-user_email_token.sql
@@ -0,0 +1,12 @@
+--
+-- E-mail confirmation token and expiration timestamp,
+-- for verification of e-mail addresses.
+--
+-- 2005-04-25
+--
+
+ALTER TABLE /*$wgDBprefix*/user
+ ADD COLUMN user_email_authenticated CHAR(14) BINARY,
+ ADD COLUMN user_email_token CHAR(32) BINARY,
+ ADD COLUMN user_email_token_expires CHAR(14) BINARY,
+ ADD INDEX (user_email_token);
diff --git a/maintenance/archives/patch-user_groups.sql b/maintenance/archives/patch-user_groups.sql
new file mode 100644
index 00000000..50f99993
--- /dev/null
+++ b/maintenance/archives/patch-user_groups.sql
@@ -0,0 +1,25 @@
+--
+-- User permissions have been broken out to a separate table;
+-- this allows sites with a shared user table to have different
+-- permissions assigned to a user in each project.
+--
+-- This table replaces the old user_rights field which used a
+-- comma-separated blob.
+--
+CREATE TABLE /*$wgDBprefix*/user_groups (
+ -- Key to user_id
+ ug_user int(5) unsigned NOT NULL default '0',
+
+ -- Group names are short symbolic string keys.
+ -- The set of group names is open-ended, though in practice
+ -- only some predefined ones are likely to be used.
+ --
+ -- At runtime $wgGroupPermissions will associate group keys
+ -- with particular permissions. A user will have the combined
+ -- permissions of any group they're explicitly in, plus
+ -- the implicit '*' and 'user' groups.
+ ug_group char(16) NOT NULL default '',
+
+ PRIMARY KEY (ug_user,ug_group),
+ KEY (ug_group)
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-user_nameindex.sql b/maintenance/archives/patch-user_nameindex.sql
new file mode 100644
index 00000000..9bf0aab1
--- /dev/null
+++ b/maintenance/archives/patch-user_nameindex.sql
@@ -0,0 +1,13 @@
+--
+-- Change the index on user_name to a unique index to prevent
+-- duplicate registrations from creeping in.
+--
+-- Run maintenance/userDupes.php or through the updater first
+-- to clean up any prior duplicate accounts.
+--
+-- Added 2005-06-05
+--
+
+ ALTER TABLE /*$wgDBprefix*/user
+ DROP INDEX user_name,
+ADD UNIQUE INDEX user_name(user_name);
diff --git a/maintenance/archives/patch-user_registration.sql b/maintenance/archives/patch-user_registration.sql
new file mode 100644
index 00000000..65fd99df
--- /dev/null
+++ b/maintenance/archives/patch-user_registration.sql
@@ -0,0 +1,9 @@
+--
+-- New user field for tracking registration time
+-- 2005-12-21
+--
+
+ALTER TABLE /*$wgDBprefix*/user
+ -- Timestamp of account registration.
+ -- Accounts predating this schema addition may contain NULL.
+ ADD user_registration CHAR(14) BINARY;
diff --git a/maintenance/archives/patch-user_rights.sql b/maintenance/archives/patch-user_rights.sql
new file mode 100644
index 00000000..36f0102a
--- /dev/null
+++ b/maintenance/archives/patch-user_rights.sql
@@ -0,0 +1,21 @@
+-- Split user table into two parts:
+-- user
+-- user_rights
+-- The later contains only the permissions of the user. This way,
+-- you can store the accounts for several wikis in one central
+-- database but keep user rights local to the wiki.
+
+CREATE TABLE /*$wgDBprefix*/user_rights (
+ -- Key to user_id
+ ur_user int(5) unsigned NOT NULL,
+
+ -- Comma-separated list of permission keys
+ ur_rights tinyblob NOT NULL default '',
+
+ UNIQUE KEY ur_user (ur_user)
+
+) TYPE=InnoDB;
+
+INSERT INTO /*$wgDBprefix*/user_rights SELECT user_id,user_rights FROM /*$wgDBprefix*/user;
+
+ALTER TABLE /*$wgDBprefix*/user DROP COLUMN user_rights;
diff --git a/maintenance/archives/patch-user_token.sql b/maintenance/archives/patch-user_token.sql
new file mode 100644
index 00000000..797dc98f
--- /dev/null
+++ b/maintenance/archives/patch-user_token.sql
@@ -0,0 +1,15 @@
+-- user_token patch
+-- 2004-09-23
+
+ALTER TABLE /*$wgDBprefix*/user ADD user_token char(32) binary NOT NULL default '';
+
+UPDATE /*$wgDBprefix*/user SET user_token = concat(
+ substring(rand(),3,4),
+ substring(rand(),3,4),
+ substring(rand(),3,4),
+ substring(rand(),3,4),
+ substring(rand(),3,4),
+ substring(rand(),3,4),
+ substring(rand(),3,4),
+ substring(rand(),3,4)
+);
diff --git a/maintenance/archives/patch-userindex.sql b/maintenance/archives/patch-userindex.sql
new file mode 100644
index 00000000..c039b2f3
--- /dev/null
+++ b/maintenance/archives/patch-userindex.sql
@@ -0,0 +1 @@
+ ALTER TABLE /*$wgDBprefix*/user ADD INDEX ( `user_name` ); \ No newline at end of file
diff --git a/maintenance/archives/patch-userlevels-defaultgroups.sql b/maintenance/archives/patch-userlevels-defaultgroups.sql
new file mode 100644
index 00000000..065653da
--- /dev/null
+++ b/maintenance/archives/patch-userlevels-defaultgroups.sql
@@ -0,0 +1,30 @@
+--
+-- Provide default groups
+-- Should probably be inserted when someone create a new database
+--
+
+INSERT INTO /*$wgDBprefix*/groups (gr_id,gr_name,gr_description,gr_rights)
+ VALUES (
+ 1,':group-anon-name',':group-anon-desc',
+ 'read,edit,createaccount'
+ );
+INSERT INTO /*$wgDBprefix*/groups (gr_id,gr_name,gr_description,gr_rights)
+ VALUES (
+ 2,':group-loggedin-name',':group-loggedin-desc',
+ 'read,edit,move,upload,validate,createaccount'
+ );
+INSERT INTO /*$wgDBprefix*/groups (gr_id,gr_name,gr_description,gr_rights)
+ VALUES (
+ 3,':group-admin-name',':group-admin-desc',
+ 'read,edit,move,upload,validate,createaccount,delete,undelete,protect,block,upload,asksql,rollback,patrol,editinterface,import'
+ );
+INSERT INTO /*$wgDBprefix*/groups (gr_id,gr_name,gr_description,gr_rights)
+ VALUES (
+ 4,':group-bureaucrat-name',':group-bureaucrat-desc',
+ 'read,edit,move,upload,validate,createaccount,delete,undelete,protect,block,upload,asksql,rollback,patrol,editinterface,import,makesysop'
+ );
+INSERT INTO /*$wgDBprefix*/groups (gr_id,gr_name,gr_description,gr_rights)
+ VALUES (
+ 5,':group-steward-name',':group-steward-desc',
+ 'read,edit,move,upload,validate,createaccount,delete,undelete,protect,block,upload,asksql,rollback,patrol,editinterface,import,makesysop,userrights,grouprights,siteadmin'
+ );
diff --git a/maintenance/archives/patch-userlevels-rights.sql b/maintenance/archives/patch-userlevels-rights.sql
new file mode 100644
index 00000000..7f1cabfc
--- /dev/null
+++ b/maintenance/archives/patch-userlevels-rights.sql
@@ -0,0 +1,5 @@
+-- Oct. 24 2004
+-- Adds the gr_rights field missing from early dev work
+
+-- Hold group name and description
+ALTER TABLE /*$wgDBprefix*/groups ADD gr_rights tinyblob;
diff --git a/maintenance/archives/patch-userlevels.sql b/maintenance/archives/patch-userlevels.sql
new file mode 100644
index 00000000..ab3a9a7b
--- /dev/null
+++ b/maintenance/archives/patch-userlevels.sql
@@ -0,0 +1,22 @@
+-- Oct. 1st 2004 - Ashar Voultoiz
+-- Implement the new sitelevels
+--
+-- This is under development to provide a showcase in HEAD :o)
+
+-- Hold group name and description
+CREATE TABLE /*$wgDBprefix*/groups (
+ gr_id int(5) unsigned NOT NULL auto_increment,
+ gr_name varchar(50) NOT NULL default '',
+ gr_description varchar(255) NOT NULL default '',
+ gr_rights tinyblob,
+ PRIMARY KEY (gr_id)
+
+) TYPE=InnoDB;
+
+-- Relation table between user and groups
+CREATE TABLE /*$wgDBprefix*/user_groups (
+ ug_user int(5) unsigned NOT NULL default '0',
+ ug_group int(5) unsigned NOT NULL default '0',
+ PRIMARY KEY (ug_user,ug_group)
+
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-usernewtalk.sql b/maintenance/archives/patch-usernewtalk.sql
new file mode 100644
index 00000000..fb8c8655
--- /dev/null
+++ b/maintenance/archives/patch-usernewtalk.sql
@@ -0,0 +1,20 @@
+--- This table stores all the IDs of users whose talk
+--- page has been changed (the respective row is deleted
+--- when the user looks at the page).
+--- The respective column in the user table is no longer
+--- required and therefore dropped.
+
+CREATE TABLE /*$wgDBprefix*/user_newtalk (
+ user_id int(5) NOT NULL default '0',
+ user_ip varchar(40) NOT NULL default '',
+ KEY user_id (user_id),
+ KEY user_ip (user_ip)
+) TYPE=MyISAM;
+
+INSERT INTO
+ /*$wgDBprefix*/user_newtalk (user_id, user_ip)
+ SELECT user_id, ''
+ FROM user
+ WHERE user_newtalk != 0;
+
+ALTER TABLE /*$wgDBprefix*/user DROP COLUMN user_newtalk;
diff --git a/maintenance/archives/patch-usernewtalk2.sql b/maintenance/archives/patch-usernewtalk2.sql
new file mode 100644
index 00000000..477109b7
--- /dev/null
+++ b/maintenance/archives/patch-usernewtalk2.sql
@@ -0,0 +1,6 @@
+CREATE TABLE /*$wgDBprefix*/user_newtalk (
+ user_id int(5) NOT NULL default '0',
+ user_ip varchar(40) NOT NULL default '',
+ INDEX user_id (user_id),
+ INDEX user_ip (user_ip)
+);
diff --git a/maintenance/archives/patch-val_ip.sql b/maintenance/archives/patch-val_ip.sql
new file mode 100644
index 00000000..9214218d
--- /dev/null
+++ b/maintenance/archives/patch-val_ip.sql
@@ -0,0 +1,4 @@
+-- Column added 2005-05-24
+
+ALTER TABLE /*$wgDBprefix*/validate
+ ADD COLUMN val_ip varchar(20) NOT NULL default '';
diff --git a/maintenance/archives/patch-validate.sql b/maintenance/archives/patch-validate.sql
new file mode 100644
index 00000000..3fa7e844
--- /dev/null
+++ b/maintenance/archives/patch-validate.sql
@@ -0,0 +1,13 @@
+-- For article validation
+
+DROP TABLE IF EXISTS /*$wgDBprefix*/validate;
+CREATE TABLE /*$wgDBprefix*/validate (
+ `val_user` int(11) NOT NULL default '0',
+ `val_page` int(11) unsigned NOT NULL default '0',
+ `val_revision` int(11) unsigned NOT NULL default '0',
+ `val_type` int(11) unsigned NOT NULL default '0',
+ `val_value` int(11) default '0',
+ `val_comment` varchar(255) NOT NULL default '',
+ `val_ip` varchar(20) NOT NULL default '',
+ KEY `val_user` (`val_user`,`val_revision`)
+) TYPE=InnoDB;
diff --git a/maintenance/archives/patch-watchlist-null.sql b/maintenance/archives/patch-watchlist-null.sql
new file mode 100644
index 00000000..37ffc163
--- /dev/null
+++ b/maintenance/archives/patch-watchlist-null.sql
@@ -0,0 +1,9 @@
+-- Set up wl_notificationtimestamp with NULL support.
+-- 2005-08-17
+
+ALTER TABLE /*$wgDBprefix*/watchlist
+ CHANGE wl_notificationtimestamp wl_notificationtimestamp varchar(14) binary;
+
+UPDATE /*$wgDBprefix*/watchlist
+ SET wl_notificationtimestamp=NULL
+ WHERE wl_notificationtimestamp='0';
diff --git a/maintenance/archives/patch-watchlist.sql b/maintenance/archives/patch-watchlist.sql
new file mode 100644
index 00000000..adee010b
--- /dev/null
+++ b/maintenance/archives/patch-watchlist.sql
@@ -0,0 +1,30 @@
+-- Convert watchlists to new new format ;)
+
+-- Ids just aren't convenient when what we want is to
+-- treat article and talk pages as equivalent.
+-- Better to use namespace (drop the 1 bit!) and title
+
+-- 2002-12-17 by Brion Vibber <brion@pobox.com>
+-- affects, affected by changes to SpecialWatchlist.php, User.php,
+-- Article.php, Title.php, SpecialRecentchanges.php
+
+DROP TABLE IF EXISTS watchlist2;
+CREATE TABLE watchlist2 (
+ wl_user int(5) unsigned NOT NULL,
+ wl_namespace tinyint(2) unsigned NOT NULL default '0',
+ wl_title varchar(255) binary NOT NULL default '',
+ UNIQUE KEY (wl_user, wl_namespace, wl_title)
+) TYPE=MyISAM PACK_KEYS=1;
+
+INSERT INTO watchlist2 (wl_user,wl_namespace,wl_title)
+ SELECT DISTINCT wl_user,(cur_namespace | 1) - 1,cur_title
+ FROM watchlist,cur WHERE wl_page=cur_id;
+
+ALTER TABLE watchlist RENAME TO oldwatchlist;
+ALTER TABLE watchlist2 RENAME TO watchlist;
+
+-- Check that the new one is correct, then:
+-- DROP TABLE oldwatchlist;
+
+-- Also should probably drop the ancient and now unused:
+ALTER TABLE user DROP COLUMN user_watch;
diff --git a/maintenance/archives/rebuildRecentchanges.inc b/maintenance/archives/rebuildRecentchanges.inc
new file mode 100644
index 00000000..54f6cb38
--- /dev/null
+++ b/maintenance/archives/rebuildRecentchanges.inc
@@ -0,0 +1,122 @@
+<?php
+/**
+ * Rebuild recent changes table
+ *
+ * @deprecated
+ * @package MediaWiki
+ * @subpackage MaintenanceArchive
+ */
+
+/** */
+function rebuildRecentChangesTable()
+{
+ $sql = "DROP TABLE IF EXISTS recentchanges";
+ wfQuery( $sql );
+
+ $sql = "CREATE TABLE recentchanges (
+ rc_timestamp varchar(14) binary NOT NULL default '',
+ rc_cur_time varchar(14) binary NOT NULL default '',
+ rc_user int(10) unsigned NOT NULL default '0',
+ rc_user_text varchar(255) binary NOT NULL default '',
+ rc_namespace tinyint(3) unsigned NOT NULL default '0',
+ rc_title varchar(255) binary NOT NULL default '',
+ rc_comment varchar(255) binary NOT NULL default '',
+ rc_minor tinyint(3) unsigned NOT NULL default '0',
+ rc_new tinyint(3) unsigned NOT NULL default '0',
+ rc_cur_id int(10) unsigned NOT NULL default '0',
+ rc_this_oldid int(10) unsigned NOT NULL default '0',
+ rc_last_oldid int(10) unsigned NOT NULL default '0',
+ INDEX rc_cur_id (rc_cur_id),
+ INDEX rc_cur_time (rc_cur_time),
+ INDEX rc_timestamp (rc_timestamp),
+ INDEX rc_namespace (rc_namespace),
+ INDEX rc_title (rc_title)
+) TYPE=MyISAM PACK_KEYS=1;";
+ wfQuery( $sql );
+
+ print( "Loading from CUR table...\n" );
+
+ $sql = "INSERT INTO recentchanges (rc_timestamp,rc_cur_time,rc_user," .
+ "rc_user_text,rc_namespace,rc_title,rc_comment,rc_minor,rc_new," .
+ "rc_cur_id,rc_this_oldid,rc_last_oldid) SELECT cur_timestamp," .
+ "cur_timestamp,cur_user,cur_user_text,cur_namespace,cur_title," .
+ "cur_comment,cur_minor_edit,cur_is_new,cur_id,0,0 FROM cur " .
+ "ORDER BY cur_timestamp DESC LIMIT 5000";
+ wfQuery( $sql );
+
+ print( "Loading from OLD table...\n" );
+
+ $sql = "INSERT INTO recentchanges (rc_timestamp,rc_cur_time,rc_user," .
+ "rc_user_text,rc_namespace,rc_title,rc_comment,rc_minor,rc_new," .
+ "rc_cur_id,rc_this_oldid,rc_last_oldid) SELECT old_timestamp,''," .
+ "old_user,old_user_text,old_namespace,old_title,old_comment," .
+ "old_minor_edit,0,0,old_id,0 FROM old ORDER BY old_timestamp " .
+ "DESC LIMIT 5000";
+ wfQuery( $sql );
+
+ $sql = "SELECT rc_timestamp FROM recentchanges " .
+ "ORDER BY rc_timestamp DESC LIMIT 5000,1";
+ $res = wfQuery( $sql );
+ $obj = wfFetchObject( $res );
+ $ts = $obj->rc_timestamp;
+
+ $sql = "DELETE FROM recentchanges WHERE rc_timestamp < '{$ts}'";
+ wfQuery( $sql );
+
+ rebuildRecentChangesTablePass2();
+}
+
+function rebuildRecentChangesTablePass2()
+{
+ $ns = $id = $count = 0;
+ $title = $ct = "";
+
+ print( "Updating links...\n" );
+
+ $sql = "SELECT rc_namespace,rc_title,rc_timestamp FROM recentchanges " .
+ "ORDER BY rc_namespace,rc_title,rc_timestamp DESC";
+ $res = wfQuery( $sql );
+
+ while ( $obj = wfFetchObject( $res ) ) {
+ if ( ! ( $ns == $obj->rc_namespace &&
+ 0 == strcmp( $title, wfStrencode( $obj->rc_title ) ) ) ) {
+
+ $ns = $obj->rc_namespace;
+ $title = wfStrencode( $obj->rc_title );
+
+ $sql = "SELECT cur_id,cur_timestamp FROM cur WHERE " .
+ "cur_namespace={$ns} AND cur_title='{$title}'";
+ $res2 = wfQuery( $sql );
+ $obj2 = wfFetchObject( $res2 );
+
+ $id = $obj2->cur_id;
+ $ct = $obj2->cur_timestamp;
+ }
+ $sql = "SELECT old_id FROM old WHERE old_namespace={$ns} " .
+ "AND old_title='{$title}' AND old_timestamp < '" .
+ "{$obj->rc_timestamp}' ORDER BY old_timestamp DESC LIMIT 1";
+ $res2 = wfQuery( $sql );
+
+ if ( 0 != wfNumRows( $res2 ) ) {
+ $obj2 = wfFetchObject( $res2 );
+
+ $sql = "UPDATE recentchanges SET rc_cur_id={$id},rc_cur_time=" .
+ "'{$ct}',rc_last_oldid={$obj2->old_id} WHERE " .
+ "rc_namespace={$ns} AND rc_title='{$title}' AND " .
+ "rc_timestamp='{$obj->rc_timestamp}'";
+ wfQuery( $sql );
+ } else {
+ $sql = "UPDATE recentchanges SET rc_cur_id={$id},rc_cur_time=" .
+ "'{$ct}' WHERE rc_namespace={$ns} AND rc_title='{$title}' " .
+ "AND rc_timestamp='{$obj->rc_timestamp}'";
+ wfQuery( $sql );
+ }
+
+ if ( 0 == ( ++$count % 500 ) ) {
+ printf( "%d records processed.\n", $count );
+ }
+ }
+}
+
+
+?>
diff --git a/maintenance/archives/upgradeWatchlist.php b/maintenance/archives/upgradeWatchlist.php
new file mode 100644
index 00000000..b4605a50
--- /dev/null
+++ b/maintenance/archives/upgradeWatchlist.php
@@ -0,0 +1,67 @@
+<?php
+/**
+ * @deprecated
+ * @package MediaWiki
+ * @subpackage MaintenanceArchive
+ */
+
+/** */
+print "This script is obsolete!";
+print "It is retained in the source here in case some of its
+code might be useful for ad-hoc conversion tasks, but it is
+not maintained and probably won't even work as is.";
+exit();
+
+# Convert watchlists to new format
+
+global $IP;
+require_once( "../LocalSettings.php" );
+require_once( "$IP/Setup.php" );
+
+$wgTitle = Title::newFromText( "Rebuild links script" );
+set_time_limit(0);
+
+$wgDBuser = "wikiadmin";
+$wgDBpassword = $wgDBadminpassword;
+
+$sql = "DROP TABLE IF EXISTS watchlist";
+wfQuery( $sql, DB_MASTER );
+$sql = "CREATE TABLE watchlist (
+ wl_user int(5) unsigned NOT NULL,
+ wl_page int(8) unsigned NOT NULL,
+ UNIQUE KEY (wl_user, wl_page)
+) TYPE=MyISAM PACK_KEYS=1";
+wfQuery( $sql, DB_MASTER );
+
+$lc = new LinkCache;
+
+# Now, convert!
+$sql = "SELECT user_id,user_watch FROM user";
+$res = wfQuery( $sql, DB_SLAVE );
+$nu = wfNumRows( $res );
+$sql = "INSERT into watchlist (wl_user,wl_page) VALUES ";
+$i = $n = 0;
+while( $row = wfFetchObject( $res ) ) {
+ $list = explode( "\n", $row->user_watch );
+ $bits = array();
+ foreach( $list as $title ) {
+ if( $id = $lc->addLink( $title ) and ! $bits[$id]++) {
+ $sql .= ($i++ ? "," : "") . "({$row->user_id},{$id})";
+ }
+ }
+ if( ($n++ % 100) == 0 ) echo "$n of $nu users done...\n";
+}
+echo "$n users done.\n";
+if( $i ) {
+ wfQuery( $sql, DB_MASTER );
+}
+
+
+# Add index
+# is this necessary?
+$sql = "ALTER TABLE watchlist
+ ADD INDEX wl_user (wl_user),
+ ADD INDEX wl_page (wl_page)";
+#wfQuery( $sql, DB_MASTER );
+
+?>
diff --git a/maintenance/attachLatest.php b/maintenance/attachLatest.php
new file mode 100644
index 00000000..024a4fac
--- /dev/null
+++ b/maintenance/attachLatest.php
@@ -0,0 +1,73 @@
+<?php
+// quick hackjob to fix damages imports on wikisource
+// page records have page_latest wrong
+
+/**
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+require_once( 'commandLine.inc' );
+
+$fixit = isset( $options['fix'] );
+$fname = 'attachLatest';
+
+echo "Looking for pages with page_latest set to 0...\n";
+$dbw =& wfGetDB( DB_MASTER );
+$result = $dbw->select( 'page',
+ array( 'page_id', 'page_namespace', 'page_title' ),
+ array( 'page_latest' => 0 ),
+ $fname );
+
+$n = 0;
+while( $row = $dbw->fetchObject( $result ) ) {
+ $pageId = intval( $row->page_id );
+ $title = Title::makeTitle( $row->page_namespace, $row->page_title );
+ $name = $title->getPrefixedText();
+ $latestTime = $dbw->selectField( 'revision',
+ 'MAX(rev_timestamp)',
+ array( 'rev_page' => $pageId ),
+ $fname );
+ if( !$latestTime ) {
+ echo "$wgDBname $pageId [[$name]] can't find latest rev time?!\n";
+ continue;
+ }
+
+ $revision = Revision::loadFromTimestamp( $dbw, $title, $latestTime );
+ if( is_null( $revision ) ) {
+ echo "$wgDBname $pageId [[$name]] latest time $latestTime, can't find revision id\n";
+ continue;
+ }
+ $id = $revision->getId();
+ echo "$wgDBname $pageId [[$name]] latest time $latestTime, rev id $id\n";
+ if( $fixit ) {
+ $article = new Article( $title );
+ $article->updateRevisionOn( $dbw, $revision );
+ }
+ $n++;
+}
+$dbw->freeResult( $result );
+echo "Done! Processed $n pages.\n";
+if( !$fixit ) {
+ echo "This was a dry run; rerun with --fix to update page_latest.\n";
+}
+
+?>
diff --git a/maintenance/attribute.php b/maintenance/attribute.php
new file mode 100644
index 00000000..3326180c
--- /dev/null
+++ b/maintenance/attribute.php
@@ -0,0 +1,105 @@
+<?php
+/**
+ * Script for re-attributing edits
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once( "commandLine.inc" );
+
+# Parameters
+if ( count( $args ) < 2 ) {
+ print "Not enough parameters\n";
+ if ( $wgWikiFarm ) {
+ print "Usage: php attribute.php <language> <site> <source> <destination>\n";
+ } else {
+ print "Usage: php attribute.php <source> <destination>\n";
+ }
+ exit;
+}
+
+$source = $args[0];
+$dest = $args[1];
+
+$dbr =& wfGetDB( DB_SLAVE );
+extract( $dbr->tableNames( 'page', 'revision','user' ));
+$eSource = $dbr->strencode( $source );
+$eDest = $dbr->strencode( $dest );
+
+# Get user id
+$res = $dbr->query( "SELECT user_id FROM $user WHERE user_name='$eDest'" );
+$row = $dbr->fetchObject( $res );
+if ( !$row ) {
+ print "Warning: the target name \"$dest\" does not exist";
+ $uid = 0;
+} else {
+ $uid = $row->user_id;
+}
+
+# Initialise files
+$logfile = fopen( "attribute.log", "a" );
+$sqlfile = fopen( "attribute.sql", "a" );
+
+fwrite( $logfile, "* $source &rarr; $dest\n" );
+
+fwrite( $sqlfile,
+"-- Changing attribution SQL file
+-- Generated with attribute.php
+-- $source -> $dest ($uid)
+");
+
+$omitTitle = "Wikipedia:Changing_attribution_for_an_edit";
+
+# Get revisions
+print "\nPage revisions\n\n";
+
+$res = $dbr->query( "SELECT page_namespace, page_title, rev_id, rev_timestamp
+FROM $revision,$page
+WHERE rev_user_text='$eSource' and rev_page=page_id" );
+$row = $dbr->fetchObject( $res );
+
+if ( $row ) {
+/*
+ if ( $row->old_title=='Votes_for_deletion' && $row->old_namespace == 4 ) {
+ # We don't have that long
+ break;
+ }
+*/
+ fwrite( $logfile, "**Revision IDs: " );
+ fwrite( $sqlfile, "UPDATE $revision SET rev_user=$uid, rev_user_text='$eDest' WHERE rev_id IN (\n" );
+
+ for ( $first=true; $row; $row = $dbr->fetchObject( $res ) ) {
+ $title = Title::makeTitle( $row->page_namespace, $row->page_title );
+ $fullTitle = $title->getPrefixedDbKey();
+ if ( $fullTitle == $omitTitle ) {
+ continue;
+ }
+
+ print "$fullTitle\n";
+ $url = $title->getFullUrl( "oldid={$row->rev_id}" );
+
+ # Output
+ fwrite( $sqlfile, " " );
+ if ( $first ) {
+ $first = false;
+ } else {
+ fwrite( $sqlfile, ", " );
+ fwrite( $logfile, ", " );
+ }
+
+ fwrite( $sqlfile, "{$row->rev_id} -- $url\n" );
+ fwrite( $logfile, "[$url {$row->rev_id}]" );
+
+ }
+ fwrite( $sqlfile, ");\n" );
+ fwrite( $logfile, "\n" );
+}
+
+print "\n";
+
+fclose( $sqlfile );
+fclose( $logfile );
+
+?>
diff --git a/maintenance/backup.inc b/maintenance/backup.inc
new file mode 100644
index 00000000..d3603bd1
--- /dev/null
+++ b/maintenance/backup.inc
@@ -0,0 +1,296 @@
+<?php
+/**
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @package MediaWiki
+ * @subpackage SpecialPage
+ */
+
+class DumpDBZip2Output extends DumpPipeOutput {
+ function DumpDBZip2Output( $file ) {
+ parent::DumpPipeOutput( "dbzip2", $file );
+ }
+}
+
+class BackupDumper {
+ var $reportingInterval = 100;
+ var $reporting = true;
+ var $pageCount = 0;
+ var $revCount = 0;
+ var $server = null; // use default
+ var $pages = null; // all pages
+ var $skipHeader = false; // don't output <mediawiki> and <siteinfo>
+ var $skipFooter = false; // don't output </mediawiki>
+ var $startId = 0;
+ var $endId = 0;
+ var $sink = null; // Output filters
+ var $stubText = false; // include rev_text_id instead of text; for 2-pass dump
+
+ function BackupDumper( $args ) {
+ $this->stderr = fopen( "php://stderr", "wt" );
+
+ // Built-in output and filter plugins
+ $this->registerOutput( 'file', 'DumpFileOutput' );
+ $this->registerOutput( 'gzip', 'DumpGZipOutput' );
+ $this->registerOutput( 'bzip2', 'DumpBZip2Output' );
+ $this->registerOutput( 'dbzip2', 'DumpDBZip2Output' );
+ $this->registerOutput( '7zip', 'Dump7ZipOutput' );
+
+ $this->registerFilter( 'latest', 'DumpLatestFilter' );
+ $this->registerFilter( 'notalk', 'DumpNotalkFilter' );
+ $this->registerFilter( 'namespace', 'DumpNamespaceFilter' );
+
+ $this->sink = $this->processArgs( $args );
+ }
+
+ /**
+ * @param string $name
+ * @param string $class name of output filter plugin class
+ */
+ function registerOutput( $name, $class ) {
+ $this->outputTypes[$name] = $class;
+ }
+
+ /**
+ * @param string $name
+ * @param string $class name of filter plugin class
+ */
+ function registerFilter( $name, $class ) {
+ $this->filterTypes[$name] = $class;
+ }
+
+ /**
+ * Load a plugin and register it
+ * @param string $class Name of plugin class; must have a static 'register'
+ * method that takes a BackupDumper as a parameter.
+ * @param string $file Full or relative path to the PHP file to load, or empty
+ */
+ function loadPlugin( $class, $file ) {
+ if( $file != '' ) {
+ require_once( $file );
+ }
+ $register = array( $class, 'register' );
+ call_user_func_array( $register, array( &$this ) );
+ }
+
+ /**
+ * @param array $args
+ * @return array
+ * @static
+ */
+ function processArgs( $args ) {
+ $sink = null;
+ $sinks = array();
+ foreach( $args as $arg ) {
+ if( preg_match( '/^--(.+?)(?:=(.+?)(?::(.+?))?)?$/', $arg, $matches ) ) {
+ @list( $full, $opt, $val, $param ) = $matches;
+ switch( $opt ) {
+ case "plugin":
+ $this->loadPlugin( $val, $param );
+ break;
+ case "output":
+ if( !is_null( $sink ) ) {
+ $sinks[] = $sink;
+ }
+ if( !isset( $this->outputTypes[$val] ) ) {
+ wfDie( "Unrecognized output sink type '$val'\n" );
+ }
+ $type = $this->outputTypes[$val];
+ $sink = new $type( $param );
+ break;
+ case "filter":
+ if( is_null( $sink ) ) {
+ $this->progress( "Warning: assuming stdout for filter output\n" );
+ $sink = new DumpOutput();
+ }
+ if( !isset( $this->filterTypes[$val] ) ) {
+ wfDie( "Unrecognized filter type '$val'\n" );
+ }
+ $type = $this->filterTypes[$val];
+ $filter = new $type( $sink, $param );
+
+ // references are lame in php...
+ unset( $sink );
+ $sink = $filter;
+
+ break;
+ case "report":
+ $this->reportingInterval = intval( $val );
+ break;
+ case "server":
+ $this->server = $val;
+ break;
+ case "force-normal":
+ if( !function_exists( 'utf8_normalize' ) ) {
+ dl( "php_utfnormal.so" );
+ if( !function_exists( 'utf8_normalize' ) ) {
+ wfDie( "Failed to load UTF-8 normalization extension. " .
+ "Install or remove --force-normal parameter to use slower code.\n" );
+ }
+ }
+ break;
+ default:
+ $this->processOption( $opt, $val, $param );
+ }
+ }
+ }
+
+ if( is_null( $sink ) ) {
+ $sink = new DumpOutput();
+ }
+ $sinks[] = $sink;
+
+ if( count( $sinks ) > 1 ) {
+ return new DumpMultiWriter( $sinks );
+ } else {
+ return $sink;
+ }
+ }
+
+ function processOption( $opt, $val, $param ) {
+ // extension point for subclasses to add options
+ }
+
+ function dump( $history, $text = MW_EXPORT_TEXT ) {
+ # This shouldn't happen if on console... ;)
+ header( 'Content-type: text/html; charset=UTF-8' );
+
+ # Notice messages will foul up your XML output even if they're
+ # relatively harmless.
+ ini_set( 'display_errors', false );
+
+ $this->initProgress( $history );
+
+ $db =& $this->backupDb();
+ $exporter = new WikiExporter( $db, $history, MW_EXPORT_STREAM, $text );
+
+ $wrapper = new ExportProgressFilter( $this->sink, $this );
+ $exporter->setOutputSink( $wrapper );
+
+ if( !$this->skipHeader )
+ $exporter->openStream();
+
+ if( is_null( $this->pages ) ) {
+ if( $this->startId || $this->endId ) {
+ $exporter->pagesByRange( $this->startId, $this->endId );
+ } else {
+ $exporter->allPages();
+ }
+ } else {
+ $exporter->pagesByName( $this->pages );
+ }
+
+ if( !$this->skipFooter )
+ $exporter->closeStream();
+
+ $this->report( true );
+ }
+
+ /**
+ * Initialise starting time and maximum revision count.
+ * We'll make ETA calculations based an progress, assuming relatively
+ * constant per-revision rate.
+ * @param int $history MW_EXPORT_CURRENT or MW_EXPORT_FULL
+ */
+ function initProgress( $history = MW_EXPORT_FULL ) {
+ $table = ($history == MW_EXPORT_CURRENT) ? 'page' : 'revision';
+ $field = ($history == MW_EXPORT_CURRENT) ? 'page_id' : 'rev_id';
+
+ $dbr =& wfGetDB( DB_SLAVE );
+ $this->maxCount = $dbr->selectField( $table, "MAX($field)", '', 'BackupDumper::dump' );
+ $this->startTime = wfTime();
+ }
+
+ function &backupDb() {
+ global $wgDBadminuser, $wgDBadminpassword;
+ global $wgDBname, $wgDebugDumpSql;
+ $flags = ($wgDebugDumpSql ? DBO_DEBUG : 0) | DBO_DEFAULT; // god-damn hack
+ $db =& new Database( $this->backupServer(), $wgDBadminuser, $wgDBadminpassword, $wgDBname, false, $flags );
+ $timeout = 3600 * 24;
+ $db->query( "SET net_read_timeout=$timeout" );
+ $db->query( "SET net_write_timeout=$timeout" );
+ return $db;
+ }
+
+ function backupServer() {
+ global $wgDBserver;
+ return $this->server
+ ? $this->server
+ : $wgDBserver;
+ }
+
+ function reportPage() {
+ $this->pageCount++;
+ }
+
+ function revCount() {
+ $this->revCount++;
+ $this->report();
+ }
+
+ function report( $final = false ) {
+ if( $final xor ( $this->revCount % $this->reportingInterval == 0 ) ) {
+ $this->showReport();
+ }
+ }
+
+ function showReport() {
+ if( $this->reporting ) {
+ $delta = wfTime() - $this->startTime;
+ $now = wfTimestamp( TS_DB );
+ if( $delta ) {
+ $rate = $this->pageCount / $delta;
+ $revrate = $this->revCount / $delta;
+ $portion = $this->revCount / $this->maxCount;
+ $eta = $this->startTime + $delta / $portion;
+ $etats = wfTimestamp( TS_DB, intval( $eta ) );
+ } else {
+ $rate = '-';
+ $revrate = '-';
+ $etats = '-';
+ }
+ global $wgDBname;
+ $this->progress( sprintf( "%s: %s %d pages (%0.3f/sec), %d revs (%0.3f/sec), ETA %s [max %d]",
+ $now, $wgDBname, $this->pageCount, $rate, $this->revCount, $revrate, $etats, $this->maxCount ) );
+ }
+ }
+
+ function progress( $string ) {
+ fwrite( $this->stderr, $string . "\n" );
+ }
+}
+
+class ExportProgressFilter extends DumpFilter {
+ function ExportProgressFilter( &$sink, &$progress ) {
+ parent::DumpFilter( $sink );
+ $this->progress = $progress;
+ }
+
+ function writeClosePage( $string ) {
+ parent::writeClosePage( $string );
+ $this->progress->reportPage();
+ }
+
+ function writeRevision( $rev, $string ) {
+ parent::writeRevision( $rev, $string );
+ $this->progress->revCount();
+ }
+}
+
+?>
diff --git a/maintenance/backupPrefetch.inc b/maintenance/backupPrefetch.inc
new file mode 100644
index 00000000..413247d7
--- /dev/null
+++ b/maintenance/backupPrefetch.inc
@@ -0,0 +1,203 @@
+<?php
+
+// Some smart guy removed XMLReader's global constants from PHP 5.1
+// and replaced them with class constants. Breaking source compatibility
+// is SUPER awesome, and I love languages which do this constantly!
+$xmlReaderConstants = array(
+ "NONE",
+ "ELEMENT",
+ "ATTRIBUTE",
+ "TEXT",
+ "CDATA",
+ "ENTITY_REF",
+ "ENTITY",
+ "PI",
+ "COMMENT",
+ "DOC",
+ "DOC_TYPE",
+ "DOC_FRAGMENT",
+ "NOTATION",
+ "WHITESPACE",
+ "SIGNIFICANT_WHITESPACE",
+ "END_ELEMENT",
+ "END_ENTITY",
+ "XML_DECLARATION",
+ "LOADDTD",
+ "DEFAULTATTRS",
+ "VALIDATE",
+ "SUBST_ENTITIES" );
+foreach( $xmlReaderConstants as $name ) {
+ $fullName = "XMLREADER_$name";
+ $newName = "XMLReader::$name";
+ if( !defined( $fullName ) ) {
+ if( defined( $newName ) ) {
+ define( $fullName, constant( $newName ) );
+ } else {
+ // broken or missing the extension...
+ }
+ }
+}
+
+/**
+ * Readahead helper for making large MediaWiki data dumps;
+ * reads in a previous XML dump to sequentially prefetch text
+ * records already normalized and decompressed.
+ *
+ * This can save load on the external database servers, hopefully.
+ *
+ * Assumes that dumps will be recorded in the canonical order:
+ * - ascending by page_id
+ * - ascending by rev_id within each page
+ * - text contents are immutable and should not change once
+ * recorded, so the previous dump is a reliable source
+ *
+ * Requires PHP 5 and the XMLReader PECL extension.
+ */
+class BaseDump {
+ var $reader = null;
+ var $atEnd = false;
+ var $atPageEnd = false;
+ var $lastPage = 0;
+ var $lastRev = 0;
+
+ function BaseDump( $infile ) {
+ $this->reader = new XMLReader();
+ $this->reader->open( $infile );
+ }
+
+ /**
+ * Attempts to fetch the text of a particular page revision
+ * from the dump stream. May return null if the page is
+ * unavailable.
+ *
+ * @param int $page ID number of page to read
+ * @param int $rev ID number of revision to read
+ * @return string or null
+ */
+ function prefetch( $page, $rev ) {
+ $page = intval( $page );
+ $rev = intval( $rev );
+ while( $this->lastPage < $page && !$this->atEnd ) {
+ $this->debug( "BaseDump::prefetch at page $this->lastPage, looking for $page" );
+ $this->nextPage();
+ }
+ if( $this->lastPage > $page || $this->atEnd ) {
+ $this->debug( "BaseDump::prefetch already past page $page looking for rev $rev [$this->lastPage, $this->lastRev]" );
+ return null;
+ }
+ while( $this->lastRev < $rev && !$this->atEnd && !$this->atPageEnd ) {
+ $this->debug( "BaseDump::prefetch at page $this->lastPage, rev $this->lastRev, looking for $page, $rev" );
+ $this->nextRev();
+ }
+ if( $this->lastRev == $rev && !$this->atEnd ) {
+ $this->debug( "BaseDump::prefetch hit on $page, $rev [$this->lastPage, $this->lastRev]" );
+ return $this->nextText();
+ } else {
+ $this->debug( "BaseDump::prefetch already past rev $rev on page $page [$this->lastPage, $this->lastRev]" );
+ return null;
+ }
+ }
+
+ function debug( $str ) {
+ wfDebug( $str . "\n" );
+ //global $dumper;
+ //$dumper->progress( $str );
+ }
+
+ /**
+ * @access private
+ */
+ function nextPage() {
+ if( $this->skipTo( 'page', 'mediawiki' ) ) {
+ if( $this->skipTo( 'id' ) ) {
+ $this->lastPage = intval( $this->nodeContents() );
+ $this->lastRev = 0;
+ $this->atPageEnd = false;
+ }
+ } else {
+ $this->atEnd = true;
+ }
+ }
+
+ /**
+ * @access private
+ */
+ function nextRev() {
+ if( $this->skipTo( 'revision' ) ) {
+ if( $this->skipTo( 'id' ) ) {
+ $this->lastRev = intval( $this->nodeContents() );
+ }
+ } else {
+ $this->atPageEnd = true;
+ }
+ }
+
+ /**
+ * @access private
+ */
+ function nextText() {
+ $this->skipTo( 'text' );
+ return strval( $this->nodeContents() );
+ }
+
+ /**
+ * @access private
+ */
+ function skipTo( $name, $parent='page' ) {
+ if( $this->atEnd ) {
+ return false;
+ }
+ while( $this->reader->read() ) {
+ if( $this->reader->nodeType == XMLREADER_ELEMENT &&
+ $this->reader->name == $name ) {
+ return true;
+ }
+ if( $this->reader->nodeType == XMLREADER_END_ELEMENT &&
+ $this->reader->name == $parent ) {
+ $this->debug( "BaseDump::skipTo found </$parent> searching for <$name>" );
+ return false;
+ }
+ }
+ return $this->close();
+ }
+
+ /**
+ * Shouldn't something like this be built-in to XMLReader?
+ * Fetches text contents of the current element, assuming
+ * no sub-elements or such scary things.
+ * @return string
+ * @access private
+ */
+ function nodeContents() {
+ if( $this->atEnd ) {
+ return null;
+ }
+ if( $this->reader->isEmptyElement ) {
+ return "";
+ }
+ $buffer = "";
+ while( $this->reader->read() ) {
+ switch( $this->reader->nodeType ) {
+ case XMLREADER_TEXT:
+// case XMLREADER_WHITESPACE:
+ case XMLREADER_SIGNIFICANT_WHITESPACE:
+ $buffer .= $this->reader->value;
+ break;
+ case XMLREADER_END_ELEMENT:
+ return $buffer;
+ }
+ }
+ return $this->close();
+ }
+
+ /**
+ * @access private
+ */
+ function close() {
+ $this->reader->close();
+ $this->atEnd = true;
+ return null;
+ }
+}
+
+?>
diff --git a/maintenance/benchmarkPurge.php b/maintenance/benchmarkPurge.php
new file mode 100644
index 00000000..69127681
--- /dev/null
+++ b/maintenance/benchmarkPurge.php
@@ -0,0 +1,65 @@
+<?php
+/**
+ * Squid purge benchmark script
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once( "commandLine.inc" );
+
+/** @todo document */
+function benchSquid( $urls, $trials = 1 ) {
+ $start = wfTime();
+ for( $i = 0; $i < $trials; $i++) {
+ SquidUpdate::purge( $urls );
+ }
+ $delta = wfTime() - $start;
+ $pertrial = $delta / $trials;
+ $pertitle = $pertrial / count( $urls );
+ return sprintf( "%4d titles in %6.2fms (%6.2fms each)",
+ count( $urls ), $pertrial * 1000.0, $pertitle * 1000.0 );
+}
+
+/** @todo document */
+function randomUrlList( $length ) {
+ $list = array();
+ for( $i = 0; $i < $length; $i++ ) {
+ $list[] = randomUrl();
+ }
+ return $list;
+}
+
+/** @todo document */
+function randomUrl() {
+ global $wgServer, $wgArticlePath;
+ return $wgServer . str_replace( '$1', randomTitle(), $wgArticlePath );
+}
+
+/** @todo document */
+function randomTitle() {
+ $str = '';
+ $length = mt_rand( 1, 20 );
+ for( $i = 0; $i < $length; $i++ ) {
+ $str .= chr( mt_rand( ord('a'), ord('z') ) );
+ }
+ return ucfirst( $str );
+}
+
+if( !$wgUseSquid ) {
+ wfDie( "Squid purge benchmark doesn't do much without squid support on.\n" );
+} else {
+ printf( "There are %d defined squid servers:\n", count( $wgSquidServers ) );
+ #echo implode( "\n", $wgSquidServers ) . "\n";
+ if( isset( $options['count'] ) ) {
+ $lengths = array( intval( $options['count'] ) );
+ } else {
+ $lengths = array( 1, 10, 100 );
+ }
+ foreach( $lengths as $length ) {
+ $urls = randomUrlList( $length );
+ $trial = benchSquid( $urls );
+ print "$trial\n";
+ }
+}
+?> \ No newline at end of file
diff --git a/maintenance/build-intl-wiki.sql b/maintenance/build-intl-wiki.sql
new file mode 100644
index 00000000..f094c8b7
--- /dev/null
+++ b/maintenance/build-intl-wiki.sql
@@ -0,0 +1,31 @@
+-- Experimental: create shared international database
+-- for new interlinking code.
+--
+
+CREATE DATABASE intl;
+
+GRANT DELETE,INSERT,SELECT,UPDATE ON intl.*
+TO wikiuser@'%' IDENTIFIED BY 'userpass';
+GRANT DELETE,INSERT,SELECT,UPDATE ON intl.*
+TO wikiuser@localhost IDENTIFIED BY 'userpass';
+GRANT DELETE,INSERT,SELECT,UPDATE ON intl.*
+TO wikiuser@localhost.localdomain IDENTIFIED BY 'userpass';
+
+USE intl;
+
+CREATE TABLE ilinks (
+ lang_from varchar(5) default NULL,
+ lang_to varchar(5) default NULL,
+ title_from tinyblob,
+ title_to tinyblob,
+ target_exists tinyint(1) default NULL
+) TYPE=MyISAM;
+
+CREATE TABLE recentchanges (
+ user_name tinyblob,
+ user_lang varchar(5) default NULL,
+ date timestamp(14) NOT NULL,
+ message tinyblob
+) TYPE=MyISAM;
+
+
diff --git a/maintenance/changePassword.php b/maintenance/changePassword.php
new file mode 100644
index 00000000..591a82b3
--- /dev/null
+++ b/maintenance/changePassword.php
@@ -0,0 +1,53 @@
+<?php
+/**
+ * Change the password of a given user
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ *
+ * @author Ævar Arnfjörð Bjarmason <avarab@gmail.com>
+ * @copyright Copyright © 2005, Ævar Arnfjörð Bjarmason
+ * @license http://www.gnu.org/copyleft/gpl.html GNU General Public License 2.0 or later
+ */
+
+class ChangePassword {
+ var $dbw;
+ var $user, $password;
+
+ function ChangePassword( $user, $password ) {
+ $this->user = User::newFromName( $user );
+ $this->password = $password;
+
+ $this->dbw =& wfGetDB( DB_MASTER );
+ }
+
+ function main() {
+ $fname = 'ChangePassword::main';
+
+ $this->dbw->update( 'user',
+ array(
+ 'user_password' => wfEncryptPassword( $this->user->getID(), $this->password )
+ ),
+ array(
+ 'user_id' => $this->user->getID()
+ ),
+ $fname
+ );
+ }
+}
+
+$optionsWithArgs = array( 'user', 'password' );
+require_once 'commandLine.inc';
+
+if( in_array( '--help', $argv ) )
+ wfDie(
+ "Usage: php changePassword.php [--user=user --password=password | --help]\n" .
+ "\toptions:\n" .
+ "\t\t--help\tshow this message\n" .
+ "\t\t--user\tthe username to operate on\n" .
+ "\t\t--password\tthe password to use\n"
+ );
+
+$cp = new ChangePassword( @$options['user'], @$options['password'] );
+$cp->main();
+?>
diff --git a/maintenance/changeuser.sql b/maintenance/changeuser.sql
new file mode 100644
index 00000000..ad1c6da6
--- /dev/null
+++ b/maintenance/changeuser.sql
@@ -0,0 +1,12 @@
+set @oldname = 'At18';
+set @newname = 'Alfio';
+
+update low_priority /*$wgDBprefix*/user set user_name=@newname where user_name=@oldname;
+update low_priority /*$wgDBprefix*/user_newtalk set user_ip=@newname where user_ip=@oldname;
+update low_priority /*$wgDBprefix*/cur set cur_user_text=@newname where cur_user_text=@oldname;
+update low_priority /*$wgDBprefix*/old set old_user_text=@newname where old_user_text=@oldname;
+update low_priority /*$wgDBprefix*/archive set ar_user_text=@newname where ar_user_text=@oldname;
+update low_priority /*$wgDBprefix*/ipblocks set ipb_address=@newname where ipb_address=@oldname;
+update low_priority /*$wgDBprefix*/oldimage set oi_user_text=@newname where oi_user_text=@oldname;
+update low_priority /*$wgDBprefix*/recentchanges set rc_user_text=@newname where rc_user_text=@oldname;
+
diff --git a/maintenance/checkUsernames.php b/maintenance/checkUsernames.php
new file mode 100644
index 00000000..b577ebc6
--- /dev/null
+++ b/maintenance/checkUsernames.php
@@ -0,0 +1,37 @@
+<?php
+error_reporting(E_ALL ^ E_NOTICE);
+require_once 'commandLine.inc';
+
+class checkUsernames {
+ var $stderr, $log;
+
+ function checkUsernames() {
+ $this->stderr = fopen( 'php://stderr', 'wt' );
+ $this->log = fopen( '/home/wikipedia/logs/checkUsernames.log', 'at' );
+ }
+ function main() {
+ global $wgDBname;
+ $fname = 'checkUsernames::main';
+
+ $dbr =& wfGetDB( DB_SLAVE );
+
+ $res = $dbr->select( 'user',
+ array( 'user_id', 'user_name' ),
+ null,
+ $fname
+ );
+
+ #fwrite( $this->stderr, "Checking $wgDBname\n" );
+ while ( $row = $dbr->fetchObject( $res ) ) {
+ if ( ! User::isValidUserName( $row->user_name ) ) {
+ $out = sprintf( "%s: %6d: '%s'\n", $wgDBname, $row->user_id, $row->user_name );
+ fwrite( $this->stderr, $out );
+ fwrite( $this->log, $out );
+ }
+ }
+ }
+}
+
+$cun = new checkUsernames();
+$cun->main();
+?>
diff --git a/maintenance/checktrans.php b/maintenance/checktrans.php
new file mode 100644
index 00000000..ebab4c7d
--- /dev/null
+++ b/maintenance/checktrans.php
@@ -0,0 +1,30 @@
+<?php
+/**
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * Check to see if all messages have been translated into the selected language.
+ * To run this script, you must have a working installation, and it checks the
+ * selected language of that installation.
+ */
+
+/** */
+require_once('commandLine.inc');
+
+if ( 'en' == $wgLanguageCode ) {
+ print "Current selected language is English. Cannot check translations.\n";
+ exit();
+}
+
+$count = $total = 0;
+$msgarray = 'wgAllMessages' . ucfirst( $wgLanguageCode );
+
+foreach ( $wgAllMessagesEn as $code => $msg ) {
+ ++$total;
+ if ( ! array_key_exists( $code, $$msgarray ) ) {
+ print "'{$code}' => \"$msg\",\n";
+ ++$count;
+ }
+}
+
+print "{$count} messages of {$total} not translated.\n";
+?>
diff --git a/maintenance/cleanupCaps.php b/maintenance/cleanupCaps.php
new file mode 100644
index 00000000..afcd1b33
--- /dev/null
+++ b/maintenance/cleanupCaps.php
@@ -0,0 +1,158 @@
+<?php
+/*
+ * Script to clean up broken page links when somebody turns on $wgCapitalLinks.
+ *
+ * Usage: php cleanupCaps.php [--dry-run]
+ * Options:
+ * --dry-run don't actually try moving them
+ *
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @author Brion Vibber <brion at pobox.com>
+ * @package MediaWiki
+ * @subpackage maintenance
+ */
+
+$options = array( 'dry-run' );
+
+require_once( 'commandLine.inc' );
+require_once( 'FiveUpgrade.inc' );
+
+class CapsCleanup extends FiveUpgrade {
+ function CapsCleanup( $dryrun = false, $namespace=0 ) {
+ parent::FiveUpgrade();
+
+ $this->maxLag = 10; # if slaves are lagged more than 10 secs, wait
+ $this->dryrun = $dryrun;
+ $this->namespace = intval( $namespace );
+ }
+
+ function cleanup() {
+ global $wgCapitalLinks;
+ if( $wgCapitalLinks ) {
+ echo "\$wgCapitalLinks is on -- no need for caps links cleanup.\n";
+ return false;
+ }
+
+ $this->runTable( 'page', 'WHERE page_namespace=' . $this->namespace,
+ array( &$this, 'processPage' ) );
+ }
+
+ function init( $count, $table ) {
+ $this->processed = 0;
+ $this->updated = 0;
+ $this->count = $count;
+ $this->startTime = wfTime();
+ $this->table = $table;
+ }
+
+ function progress( $updated ) {
+ $this->updated += $updated;
+ $this->processed++;
+ if( $this->processed % 100 != 0 ) {
+ return;
+ }
+ $portion = $this->processed / $this->count;
+ $updateRate = $this->updated / $this->processed;
+
+ $now = wfTime();
+ $delta = $now - $this->startTime;
+ $estimatedTotalTime = $delta / $portion;
+ $eta = $this->startTime + $estimatedTotalTime;
+
+ printf( "%s: %6.2f%% done on %s; ETA %s [%d/%d] %.2f/sec <%.2f%% updated>\n",
+ wfTimestamp( TS_DB, intval( $now ) ),
+ $portion * 100.0,
+ $this->table,
+ wfTimestamp( TS_DB, intval( $eta ) ),
+ $this->processed,
+ $this->count,
+ $this->processed / $delta,
+ $updateRate * 100.0 );
+ flush();
+ }
+
+ function runTable( $table, $where, $callback ) {
+ $fname = 'CapsCleanup::buildTable';
+
+ $count = $this->dbw->selectField( $table, 'count(*)', '', $fname );
+ $this->init( $count, 'page' );
+ $this->log( "Processing $table..." );
+
+ $tableName = $this->dbr->tableName( $table );
+ $sql = "SELECT * FROM $tableName $where";
+ $result = $this->dbr->query( $sql, $fname );
+
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $updated = call_user_func( $callback, $row );
+ }
+ $this->log( "Finished $table... $this->updated of $this->processed rows updated" );
+ $this->dbr->freeResult( $result );
+ }
+
+ function processPage( $row ) {
+ global $wgContLang;
+
+ $current = Title::makeTitle( $row->page_namespace, $row->page_title );
+ $display = $current->getPrefixedText();
+ $upper = $row->page_title;
+ $lower = $wgContLang->lcfirst( $row->page_title );
+ if( $upper == $lower ) {
+ $this->log( "\"$display\" already lowercase." );
+ return $this->progress( 0 );
+ }
+
+ $target = Title::makeTitle( $row->page_namespace, $lower );
+ $targetDisplay = $target->getPrefixedText();
+ if( $target->exists() ) {
+ $this->log( "\"$display\" skipped; \"$targetDisplay\" already exists" );
+ return $this->progress( 0 );
+ }
+
+ if( $this->dryrun ) {
+ $this->log( "\"$display\" -> \"$targetDisplay\": DRY RUN, NOT MOVED" );
+ $ok = true;
+ } else {
+ $ok = $current->moveTo( $target, false, 'Converting page titles to lowercase' );
+ $this->log( "\"$display\" -> \"$targetDisplay\": $ok" );
+ }
+ if( $ok === true ) {
+ $this->progress( 1 );
+
+ if( $row->page_namespace == $this->namespace ) {
+ $talk = $target->getTalkPage();
+ $xrow = $row;
+ $row->page_namespace = $talk->getNamespace();
+ if( $talk->exists() ) {
+ return $this->processPage( $row );
+ }
+ }
+ } else {
+ $this->progress( 0 );
+ }
+ }
+
+}
+
+$wgUser->setName( 'Conversion script' );
+$ns = isset( $options['namespace'] ) ? $options['namespace'] : 0;
+$caps = new CapsCleanup( isset( $options['dry-run'] ), $ns );
+$caps->cleanup();
+
+?>
diff --git a/maintenance/cleanupDupes.inc b/maintenance/cleanupDupes.inc
new file mode 100644
index 00000000..18daab08
--- /dev/null
+++ b/maintenance/cleanupDupes.inc
@@ -0,0 +1,131 @@
+<?php
+# Copyright (C) 2004 Brion Vibber <brion@pobox.com>
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+/**
+ * If on the old non-unique indexes, check the cur table for duplicate
+ * entries and remove them...
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+function fixDupes( $fixthem = false) {
+ $dbw =& wfGetDB( DB_MASTER );
+ $cur = $dbw->tableName( 'cur' );
+ $old = $dbw->tableName( 'old' );
+ $dbw->query( "LOCK TABLES $cur WRITE, $old WRITE" );
+ echo "Checking for duplicate cur table entries... (this may take a while on a large wiki)\n";
+ $res = $dbw->query( <<<END
+SELECT cur_namespace,cur_title,count(*) as c,min(cur_id) as id
+ FROM $cur
+ GROUP BY cur_namespace,cur_title
+HAVING c > 1
+END
+ );
+ $n = $dbw->numRows( $res );
+ echo "Found $n titles with duplicate entries.\n";
+ if( $n > 0 ) {
+ if( $fixthem ) {
+ echo "Correcting...\n";
+ } else {
+ echo "Just a demo...\n";
+ }
+ while( $row = $dbw->fetchObject( $res ) ) {
+ $ns = intval( $row->cur_namespace );
+ $title = $dbw->addQuotes( $row->cur_title );
+
+ # Get the first responding ID; that'll be the one we keep.
+ $id = $dbw->selectField( 'cur', 'cur_id', array(
+ 'cur_namespace' => $row->cur_namespace,
+ 'cur_title' => $row->cur_title ) );
+
+ echo "$ns:$row->cur_title (canonical ID $id)\n";
+ if( $id != $row->id ) {
+ echo " ** minimum ID $row->id; ";
+ $timeMin = $dbw->selectField( 'cur', 'cur_timestamp', array(
+ 'cur_id' => $row->id ) );
+ $timeFirst = $dbw->selectField( 'cur', 'cur_timestamp', array(
+ 'cur_id' => $id ) );
+ if( $timeMin == $timeFirst ) {
+ echo "timestamps match at $timeFirst; ok\n";
+ } else {
+ echo "timestamps don't match! min: $timeMin, first: $timeFirst; ";
+ if( $timeMin > $timeFirst ) {
+ $id = $row->id;
+ echo "keeping minimum: $id\n";
+ } else {
+ echo "keeping first: $id\n";
+ }
+ }
+ }
+
+ if( $fixthem ) {
+ $dbw->query( <<<END
+INSERT
+ INTO $old
+ (old_namespace, old_title, old_text,
+ old_comment, old_user, old_user_text,
+ old_timestamp, old_minor_edit, old_flags,
+ inverse_timestamp)
+SELECT cur_namespace, cur_title, cur_text,
+ cur_comment, cur_user, cur_user_text,
+ cur_timestamp, cur_minor_edit, '',
+ inverse_timestamp
+ FROM $cur
+ WHERE cur_namespace=$ns
+ AND cur_title=$title
+ AND cur_id != $id
+END
+ );
+ $dbw->query( <<<END
+DELETE
+ FROM $cur
+ WHERE cur_namespace=$ns
+ AND cur_title=$title
+ AND cur_id != $id
+END
+ );
+ }
+ }
+ }
+ $dbw->query( 'UNLOCK TABLES' );
+ if( $fixthem ) {
+ echo "Done.\n";
+ } else {
+ echo "Run again with --fix option to delete the duplicates.\n";
+ }
+}
+
+function checkDupes( $fixthem = false, $indexonly = false ) {
+ global $wgDBname;
+ $dbw =& wfGetDB( DB_MASTER );
+ if( $dbw->indexExists( 'cur', 'name_title' ) &&
+ $dbw->indexUnique( 'cur', 'name_title' ) ) {
+ echo "$wgDBname: cur table has the current unique index; no duplicate entries.\n";
+ } elseif( $dbw->indexExists( 'cur', 'name_title_dup_prevention' ) ) {
+ echo "$wgDBname: cur table has a temporary name_title_dup_prevention unique index; no duplicate entries.\n";
+ } else {
+ echo "$wgDBname: cur table has the old non-unique index and may have duplicate entries.\n";
+ if( !$indexonly ) {
+ fixDupes( $fixthem );
+ }
+ }
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/cleanupDupes.php b/maintenance/cleanupDupes.php
new file mode 100644
index 00000000..3aea2304
--- /dev/null
+++ b/maintenance/cleanupDupes.php
@@ -0,0 +1,37 @@
+<?php
+# Copyright (C) 2004 Brion Vibber <brion@pobox.com>
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+/**
+ * If on the old non-unique indexes, check the cur table for duplicate
+ * entries and remove them...
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+$options = array( 'fix', 'index' );
+
+/** */
+require_once( 'commandLine.inc' );
+require_once( 'cleanupDupes.inc' );
+$wgTitle = Title::newFromText( 'Dupe cur entry cleanup script' );
+
+checkDupes( isset( $options['fix'] ), isset( $options['index'] ) );
+
+?>
diff --git a/maintenance/cleanupSpam.php b/maintenance/cleanupSpam.php
new file mode 100644
index 00000000..65d6bc4d
--- /dev/null
+++ b/maintenance/cleanupSpam.php
@@ -0,0 +1,112 @@
+<?php
+
+require_once( 'commandLine.inc' );
+require_once( "$IP/includes/LinkFilter.php" );
+
+function cleanupArticle( $id, $domain ) {
+ $title = Title::newFromID( $id );
+ if ( !$title ) {
+ print "Internal error: no page for ID $id\n";
+ return;
+ }
+
+ print $title->getPrefixedDBkey() . " ...";
+ $rev = Revision::newFromTitle( $title );
+ $reverted = false;
+ $revId = $rev->getId();
+ $currentRevId = $revId;
+ $regex = LinkFilter::makeRegex( $domain );
+
+ while ( $rev && preg_match( $regex, $rev->getText() ) ) {
+ # Revision::getPrevious can't be used in this way before MW 1.6 (Revision.php 1.26)
+ #$rev = $rev->getPrevious();
+ $revId = $title->getPreviousRevisionID( $revId );
+ if ( $revId ) {
+ $rev = Revision::newFromTitle( $title, $revId );
+ } else {
+ $rev = false;
+ }
+ }
+ if ( $revId == $currentRevId ) {
+ // The regex didn't match the current article text
+ // This happens e.g. when a link comes from a template rather than the page itself
+ print "False match\n";
+ } else {
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->immediateBegin();
+ if ( !$rev ) {
+ // Didn't find a non-spammy revision, blank the page
+ print "blanking\n";
+ $article = new Article( $title );
+ $article->updateArticle( '', wfMsg( 'spam_blanking', $domain ),
+ false, false );
+
+ } else {
+ // Revert to this revision
+ print "reverting\n";
+ $article = new Article( $title );
+ $article->updateArticle( $rev->getText(), wfMsg( 'spam_reverting', $domain ), false, false );
+ }
+ $dbw->immediateCommit();
+ wfDoUpdates();
+ }
+}
+//------------------------------------------------------------------------------
+
+
+
+
+$username = wfMsg( 'spambot_username' );
+$fname = $username;
+$wgUser = User::newFromName( $username );
+// Create the user if necessary
+if ( !$wgUser->getID() ) {
+ $wgUser->addToDatabase();
+}
+
+if ( !isset( $args[0] ) ) {
+ print "Usage: php cleanupSpam.php <hostname>\n";
+ exit(1);
+}
+$spec = $args[0];
+$like = LinkFilter::makeLike( $spec );
+if ( !$like ) {
+ print "Not a valid hostname specification: $spec\n";
+ exit(1);
+}
+
+$dbr =& wfGetDB( DB_SLAVE );
+
+if ( $options['all'] ) {
+ // Clean up spam on all wikis
+ $dbr =& wfGetDB( DB_SLAVE );
+ print "Finding spam on " . count($wgLocalDatabases) . " wikis\n";
+ $found = false;
+ foreach ( $wgLocalDatabases as $db ) {
+ $count = $dbr->selectField( "`$db`.externallinks", 'COUNT(*)',
+ array( 'el_index LIKE ' . $dbr->addQuotes( $like ) ), $fname );
+ if ( $count ) {
+ $found = true;
+ passthru( "php cleanupSpam.php $db $spec | sed s/^/$db: /" );
+ }
+ }
+ if ( $found ) {
+ print "All done\n";
+ } else {
+ print "None found\n";
+ }
+} else {
+ // Clean up spam on this wiki
+ $res = $dbr->select( 'externallinks', array( 'DISTINCT el_from' ),
+ array( 'el_index LIKE ' . $dbr->addQuotes( $like ) ), $fname );
+ $count = $dbr->numRows( $res );
+ print "Found $count articles containing $spec\n";
+ while ( $row = $dbr->fetchObject( $res ) ) {
+ cleanupArticle( $row->el_from, $spec );
+ }
+ if ( $count ) {
+ print "Done\n";
+ }
+}
+
+?>
diff --git a/maintenance/cleanupTitles.php b/maintenance/cleanupTitles.php
new file mode 100644
index 00000000..930072de
--- /dev/null
+++ b/maintenance/cleanupTitles.php
@@ -0,0 +1,210 @@
+<?php
+/*
+ * Script to clean up broken, unparseable titles.
+ *
+ * Usage: php cleanupTitles.php [--dry-run]
+ * Options:
+ * --dry-run don't actually try moving them
+ *
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @author Brion Vibber <brion at pobox.com>
+ * @package MediaWiki
+ * @subpackage maintenance
+ */
+
+$options = array( 'dry-run' );
+
+require_once( 'commandLine.inc' );
+require_once( 'FiveUpgrade.inc' );
+
+class TitleCleanup extends FiveUpgrade {
+ function TitleCleanup( $dryrun = false ) {
+ parent::FiveUpgrade();
+
+ $this->maxLag = 10; # if slaves are lagged more than 10 secs, wait
+ $this->dryrun = $dryrun;
+ }
+
+ function cleanup() {
+ $this->runTable( 'page',
+ '', //'WHERE page_namespace=0',
+ array( &$this, 'processPage' ) );
+ }
+
+ function init( $count, $table ) {
+ $this->processed = 0;
+ $this->updated = 0;
+ $this->count = $count;
+ $this->startTime = wfTime();
+ $this->table = $table;
+ }
+
+ function progress( $updated ) {
+ $this->updated += $updated;
+ $this->processed++;
+ if( $this->processed % 100 != 0 ) {
+ return;
+ }
+ $portion = $this->processed / $this->count;
+ $updateRate = $this->updated / $this->processed;
+
+ $now = wfTime();
+ $delta = $now - $this->startTime;
+ $estimatedTotalTime = $delta / $portion;
+ $eta = $this->startTime + $estimatedTotalTime;
+
+ global $wgDBname;
+ printf( "%s %s: %6.2f%% done on %s; ETA %s [%d/%d] %.2f/sec <%.2f%% updated>\n",
+ $wgDBname,
+ wfTimestamp( TS_DB, intval( $now ) ),
+ $portion * 100.0,
+ $this->table,
+ wfTimestamp( TS_DB, intval( $eta ) ),
+ $this->processed,
+ $this->count,
+ $this->processed / $delta,
+ $updateRate * 100.0 );
+ flush();
+ }
+
+ function runTable( $table, $where, $callback ) {
+ $fname = 'CapsCleanup::buildTable';
+
+ $count = $this->dbw->selectField( $table, 'count(*)', '', $fname );
+ $this->init( $count, 'page' );
+ $this->log( "Processing $table..." );
+
+ $tableName = $this->dbr->tableName( $table );
+ $sql = "SELECT * FROM $tableName $where";
+ $result = $this->dbr->query( $sql, $fname );
+
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $updated = call_user_func( $callback, $row );
+ }
+ $this->log( "Finished $table... $this->updated of $this->processed rows updated" );
+ $this->dbr->freeResult( $result );
+ }
+
+ function processPage( $row ) {
+ global $wgContLang;
+
+ $current = Title::makeTitle( $row->page_namespace, $row->page_title );
+ $display = $current->getPrefixedText();
+
+ $verified = UtfNormal::cleanUp( $display );
+
+ $title = Title::newFromText( $verified );
+
+ if( is_null( $title ) ) {
+ $this->log( "page $row->page_id ($display) is illegal." );
+ $this->moveIllegalPage( $row );
+ return $this->progress( 1 );
+ }
+
+ if( !$title->equals( $current ) ) {
+ $this->log( "page $row->page_id ($display) doesn't match self." );
+ $this->moveInconsistentPage( $row, $title );
+ return $this->progress( 1 );
+ }
+
+ $this->progress( 0 );
+ }
+
+ function moveIllegalPage( $row ) {
+ $legal = 'A-Za-z0-9_/\\\\-';
+ $legalized = preg_replace_callback( "!([^$legal])!",
+ array( &$this, 'hexChar' ),
+ $row->page_title );
+ if( $legalized == '.' ) $legalized = '(dot)';
+ if( $legalized == '_' ) $legalized = '(space)';
+ $legalized = 'Broken/' . $legalized;
+
+ $title = Title::newFromText( $legalized );
+ if( is_null( $title ) ) {
+ $clean = 'Broken/id:' . $row->page_id;
+ $this->log( "Couldn't legalize; form '$legalized' still invalid; using '$clean'" );
+ $title = Title::newFromText( $clean );
+ } elseif( $title->exists() ) {
+ $clean = 'Broken/id:' . $row->page_id;
+ $this->log( "Legalized for '$legalized' exists; using '$clean'" );
+ $title = Title::newFromText( $clean );
+ }
+
+ $dest = $title->getDbKey();
+ if( $this->dryrun ) {
+ $this->log( "DRY RUN: would rename $row->page_id ($row->page_namespace,'$row->page_title') to ($row->page_namespace,'$dest')" );
+ } else {
+ $this->log( "renaming $row->page_id ($row->page_namespace,'$row->page_title') to ($row->page_namespace,'$dest')" );
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->update( 'page',
+ array( 'page_title' => $dest ),
+ array( 'page_id' => $row->page_id ),
+ 'cleanupTitles::moveInconsistentPage' );
+ }
+ }
+
+ function moveInconsistentPage( $row, $title ) {
+ if( $title->exists() || $title->getInterwiki() ) {
+ if( $title->getInterwiki() ) {
+ $prior = $title->getPrefixedDbKey();
+ } else {
+ $prior = $title->getDbKey();
+ }
+ $clean = 'Broken/' . $prior;
+ $verified = Title::makeTitleSafe( $row->page_namespace, $clean );
+ if( $verified->exists() ) {
+ $blah = "Broken/id:" . $row->page_id;
+ $this->log( "Couldn't legalize; form '$clean' exists; using '$blah'" );
+ $verified = Title::makeTitleSafe( $row->page_namespace, $blah );
+ }
+ $title = $verified;
+ }
+ if( is_null( $title ) ) {
+ wfDie( "Something awry; empty title.\n" );
+ }
+ $ns = $title->getNamespace();
+ $dest = $title->getDbKey();
+ if( $this->dryrun ) {
+ $this->log( "DRY RUN: would rename $row->page_id ($row->page_namespace,'$row->page_title') to ($row->page_namespace,'$dest')" );
+ } else {
+ $this->log( "renaming $row->page_id ($row->page_namespace,'$row->page_title') to ($ns,'$dest')" );
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->update( 'page',
+ array(
+ 'page_namespace' => $ns,
+ 'page_title' => $dest
+ ),
+ array( 'page_id' => $row->page_id ),
+ 'cleanupTitles::moveInconsistentPage' );
+ $linkCache =& LinkCache::singleton();
+ $linkCache->clear();
+ }
+ }
+
+ function hexChar( $matches ) {
+ return sprintf( "\\x%02x", ord( $matches[1] ) );
+ }
+}
+
+$wgUser->setName( 'Conversion script' );
+$caps = new TitleCleanup( isset( $options['dry-run'] ) );
+$caps->cleanup();
+
+?>
diff --git a/maintenance/cleanupWatchlist.php b/maintenance/cleanupWatchlist.php
new file mode 100644
index 00000000..d2925db3
--- /dev/null
+++ b/maintenance/cleanupWatchlist.php
@@ -0,0 +1,141 @@
+<?php
+/*
+ * Script to remove broken, unparseable titles in the Watchlist.
+ *
+ * Usage: php cleanupWatchlist.php [--fix]
+ * Options:
+ * --fix Actually remove entries; without will only report.
+ *
+ * Copyright (C) 2005,2006 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @author Brion Vibber <brion at pobox.com>
+ * @package MediaWiki
+ * @subpackage maintenance
+ */
+
+$options = array( 'fix' );
+
+require_once( 'commandLine.inc' );
+require_once( 'FiveUpgrade.inc' );
+
+class WatchlistCleanup extends FiveUpgrade {
+ function WatchlistCleanup( $dryrun = false ) {
+ parent::FiveUpgrade();
+
+ $this->maxLag = 10; # if slaves are lagged more than 10 secs, wait
+ $this->dryrun = $dryrun;
+ }
+
+ function cleanup() {
+ $this->runTable( 'watchlist',
+ '',
+ array( &$this, 'processEntry' ) );
+ }
+
+ function init( $count, $table ) {
+ $this->processed = 0;
+ $this->updated = 0;
+ $this->count = $count;
+ $this->startTime = wfTime();
+ $this->table = $table;
+ }
+
+ function progress( $updated ) {
+ $this->updated += $updated;
+ $this->processed++;
+ if( $this->processed % 100 != 0 ) {
+ return;
+ }
+ $portion = $this->processed / $this->count;
+ $updateRate = $this->updated / $this->processed;
+
+ $now = wfTime();
+ $delta = $now - $this->startTime;
+ $estimatedTotalTime = $delta / $portion;
+ $eta = $this->startTime + $estimatedTotalTime;
+
+ global $wgDBname;
+ printf( "%s %s: %6.2f%% done on %s; ETA %s [%d/%d] %.2f/sec <%.2f%% updated>\n",
+ $wgDBname,
+ wfTimestamp( TS_DB, intval( $now ) ),
+ $portion * 100.0,
+ $this->table,
+ wfTimestamp( TS_DB, intval( $eta ) ),
+ $this->processed,
+ $this->count,
+ $this->processed / $delta,
+ $updateRate * 100.0 );
+ flush();
+ }
+
+ function runTable( $table, $where, $callback ) {
+ $fname = 'WatchlistCleanup::runTable';
+
+ $count = $this->dbw->selectField( $table, 'count(*)', '', $fname );
+ $this->init( $count, 'watchlist' );
+ $this->log( "Processing $table..." );
+
+ $tableName = $this->dbr->tableName( $table );
+ $sql = "SELECT * FROM $tableName $where";
+ $result = $this->dbr->query( $sql, $fname );
+
+ while( $row = $this->dbr->fetchObject( $result ) ) {
+ $updated = call_user_func( $callback, $row );
+ }
+ $this->log( "Finished $table... $this->updated of $this->processed rows updated" );
+ $this->dbr->freeResult( $result );
+ }
+
+ function processEntry( $row ) {
+ global $wgContLang;
+
+ $current = Title::makeTitle( $row->wl_namespace, $row->wl_title );
+ $display = $current->getPrefixedText();
+
+ $verified = UtfNormal::cleanUp( $display );
+
+ $title = Title::newFromText( $verified );
+
+ if( $row->wl_user == 0 || is_null( $title ) || !$title->equals( $current ) ) {
+ $this->log( "invalid watch by {$row->wl_user} for ({$row->wl_namespace}, \"{$row->wl_title}\")" );
+ $this->removeWatch( $row );
+ return $this->progress( 1 );
+ }
+
+ $this->progress( 0 );
+ }
+
+ function removeWatch( $row ) {
+ if( !$this->dryrun) {
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->delete( 'watchlist', array(
+ 'wl_user' => $row->wl_user,
+ 'wl_namespace' => $row->wl_namespace,
+ 'wl_title' => $row->wl_title ),
+ 'WatchlistCleanup::removeWatch' );
+ $this->log( '- removed' );
+ }
+ }
+}
+
+$wgUser->setName( 'Conversion script' );
+$caps = new WatchlistCleanup( !isset( $options['fix'] ) );
+$caps->cleanup();
+
+?>
diff --git a/maintenance/clear_interwiki_cache.php b/maintenance/clear_interwiki_cache.php
new file mode 100644
index 00000000..97869728
--- /dev/null
+++ b/maintenance/clear_interwiki_cache.php
@@ -0,0 +1,26 @@
+<?php
+/**
+ * This script is used to clear the interwiki links for ALL languages in
+ * memcached.
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once('commandLine.inc');
+
+$dbr =& wfGetDB( DB_SLAVE );
+$res = $dbr->select( 'interwiki', array( 'iw_prefix' ), false );
+$prefixes = array();
+while ( $row = $dbr->fetchObject( $res ) ) {
+ $prefixes[] = $row->iw_prefix;
+}
+
+foreach ( $wgLocalDatabases as $db ) {
+ print "$db ";
+ foreach ( $prefixes as $prefix ) {
+ $wgMemc->delete("$db:interwiki:$prefix");
+ }
+}
+print "\n";
+?>
diff --git a/maintenance/clear_stats.php b/maintenance/clear_stats.php
new file mode 100644
index 00000000..00cfd0ce
--- /dev/null
+++ b/maintenance/clear_stats.php
@@ -0,0 +1,31 @@
+<?php
+require_once('commandLine.inc');
+
+foreach ( $wgLocalDatabases as $db ) {
+ noisyDelete("$db:stats:request_with_session");
+ noisyDelete("$db:stats:request_without_session");
+ noisyDelete("$db:stats:pcache_hit");
+ noisyDelete("$db:stats:pcache_miss_invalid");
+ noisyDelete("$db:stats:pcache_miss_expired");
+ noisyDelete("$db:stats:pcache_miss_absent");
+ noisyDelete("$db:stats:pcache_miss_stub");
+ noisyDelete("$db:stats:image_cache_hit");
+ noisyDelete("$db:stats:image_cache_miss");
+ noisyDelete("$db:stats:image_cache_update");
+ noisyDelete("$db:stats:diff_cache_hit");
+ noisyDelete("$db:stats:diff_cache_miss");
+ noisyDelete("$db:stats:diff_uncacheable");
+}
+
+function noisyDelete( $key ) {
+ global $wgMemc;
+ /*
+ print "$key ";
+ if ( $wgMemc->delete($key) ) {
+ print "deleted\n";
+ } else {
+ print "FAILED\n";
+ }*/
+ $wgMemc->delete($key);
+}
+?>
diff --git a/maintenance/commandLine.inc b/maintenance/commandLine.inc
new file mode 100644
index 00000000..2bb5389e
--- /dev/null
+++ b/maintenance/commandLine.inc
@@ -0,0 +1,232 @@
+<?php
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+$wgRequestTime = microtime(true);
+
+/** */
+# Abort if called from a web server
+if ( isset( $_SERVER ) && array_key_exists( 'REQUEST_METHOD', $_SERVER ) ) {
+ print "This script must be run from the command line\n";
+ exit();
+}
+
+define('MEDIAWIKI',true);
+
+# Process command line arguments
+# $options becomes an array with keys set to the option names
+# $optionsWithArgs is an array of GNU-style options that take an argument. The arguments are returned
+# in the values of $options.
+# $args becomes a zero-based array containing the non-option arguments
+
+if ( !isset( $optionsWithArgs ) ) {
+ $optionsWithArgs = array();
+}
+$optionsWithArgs[] = 'conf'; # For specifying the location of LocalSettings.php
+
+$self = array_shift( $argv );
+$self = __FILE__;
+$IP = realpath( dirname( $self ) . '/..' );
+#chdir( $IP );
+
+$options = array();
+$args = array();
+
+
+# Parse arguments
+
+for( $arg = reset( $argv ); $arg !== false; $arg = next( $argv ) ) {
+ if ( $arg == '--' ) {
+ # End of options, remainder should be considered arguments
+ $arg = next( $argv );
+ while( $arg !== false ) {
+ $args[] = $arg;
+ $arg = next( $argv );
+ }
+ break;
+ } elseif ( substr( $arg, 0, 2 ) == '--' ) {
+ # Long options
+ $option = substr( $arg, 2 );
+ if ( in_array( $option, $optionsWithArgs ) ) {
+ $param = next( $argv );
+ if ( $param === false ) {
+ echo "$arg needs an value after it\n";
+ die( -1 );
+ }
+ $options[$option] = $param;
+ } else {
+ $bits = explode( '=', $option, 2 );
+ if( count( $bits ) > 1 ) {
+ $option = $bits[0];
+ $param = $bits[1];
+ } else {
+ $param = 1;
+ }
+ $options[$option] = $param;
+ }
+ } elseif ( substr( $arg, 0, 1 ) == '-' ) {
+ # Short options
+ for ( $p=1; $p<strlen( $arg ); $p++ ) {
+ $option = $arg{$p};
+ if ( in_array( $option, $optionsWithArgs ) ) {
+ $param = next( $argv );
+ if ( $param === false ) {
+ echo "$arg needs an value after it\n";
+ die( -1 );
+ }
+ $options[$option] = $param;
+ } else {
+ $options[$option] = 1;
+ }
+ }
+ } else {
+ $args[] = $arg;
+ }
+}
+
+
+# General initialisation
+
+$wgCommandLineMode = true;
+# Turn off output buffering if it's on
+@ob_end_flush();
+$sep = PATH_SEPARATOR;
+
+if (!isset( $wgUseNormalUser ) ) {
+ $wgUseNormalUser = false;
+}
+
+if ( file_exists( '/home/wikipedia/common/langlist' ) ) {
+ $wgWikiFarm = true;
+ $cluster = trim( file_get_contents( '/etc/cluster' ) );
+ require_once( "$IP/includes/SiteConfiguration.php" );
+
+ # Get $wgConf
+ require( "$IP/wgConf.php" );
+
+ if ( empty( $wgNoDBParam ) ) {
+ # Check if we were passed a db name
+ $db = array_shift( $args );
+ list( $site, $lang ) = $wgConf->siteFromDB( $db );
+
+ # If not, work out the language and site the old way
+ if ( is_null( $site ) || is_null( $lang ) ) {
+ if ( !$db ) {
+ $lang = 'aa';
+ } else {
+ $lang = $db;
+ }
+ if ( isset( $args[0] ) ) {
+ $site = array_shift( $args );
+ } else {
+ $site = 'wikipedia';
+ }
+ }
+ } else {
+ $lang = 'aa';
+ $site = 'wikipedia';
+ }
+
+ # This is for the IRC scripts, which now run as the apache user
+ # The apache user doesn't have access to the wikiadmin_pass command
+ if ( $_ENV['USER'] == 'apache' ) {
+ $wgUseNormalUser = true;
+ }
+
+ putenv( 'wikilang='.$lang);
+
+ $DP = $IP;
+ ini_set( 'include_path', ".:$IP:$IP/includes:$IP/languages:$IP/maintenance" );
+
+ require_once( $IP.'/includes/ProfilerStub.php' );
+ require_once( $IP.'/includes/Defines.php' );
+ require_once( $IP.'/CommonSettings.php' );
+
+ $bin = '/home/wikipedia/bin';
+ if ( $wgUseRootUser ) {
+ $wgDBuser = $wgDBadminuser = 'root';
+ $wgDBpassword = $wgDBadminpassword = trim(`$bin/mysql_root_pass`);
+ } elseif ( !$wgUseNormalUser ) {
+ $wgDBuser = $wgDBadminuser = 'wikiadmin';
+ $wgDBpassword = $wgDBadminpassword = trim(`$bin/wikiadmin_pass`);
+ }
+} else {
+ $wgWikiFarm = false;
+ if ( isset( $options['conf'] ) ) {
+ $settingsFile = $options['conf'];
+ } else {
+ $settingsFile = "$IP/LocalSettings.php";
+ }
+
+ if ( ! is_readable( $settingsFile ) ) {
+ print "A copy of your installation's LocalSettings.php\n" .
+ "must exist in the source directory.\n";
+ exit( 1 );
+ }
+ $wgCommandLineMode = true;
+ $DP = $IP;
+ require_once( $IP.'/includes/ProfilerStub.php' );
+ require_once( $IP.'/includes/Defines.php' );
+ require_once( $settingsFile );
+ ini_set( 'include_path', ".$sep$IP$sep$IP/includes$sep$IP/languages$sep$IP/maintenance" );
+
+ if ( is_readable( $IP.'/AdminSettings.php' ) ) {
+ require_once( $IP.'/AdminSettings.php' );
+ }
+}
+
+# Turn off output buffering again, it might have been turned on in the settings files
+@ob_end_flush();
+# Same with these
+$wgCommandLineMode = true;
+
+if ( empty( $wgUseNormalUser ) && isset( $wgDBadminuser ) ) {
+ $wgDBuser = $wgDBadminuser;
+ $wgDBpassword = $wgDBadminpassword;
+
+ if( $wgDBservers ) {
+ foreach ( $wgDBservers as $i => $server ) {
+ $wgDBservers[$i]['user'] = $wgDBuser;
+ $wgDBservers[$i]['password'] = $wgDBpassword;
+ }
+ }
+}
+
+if ( defined( 'MW_CMDLINE_CALLBACK' ) ) {
+ $fn = MW_CMDLINE_CALLBACK;
+ $fn();
+}
+
+ini_set( 'memory_limit', -1 );
+
+require_once( 'Setup.php' );
+require_once( 'install-utils.inc' );
+$wgTitle = Title::newFromText( 'Command line script' );
+set_time_limit(0);
+
+// --------------------------------------------------------------------
+// Functions
+// --------------------------------------------------------------------
+
+function wfWaitForSlaves( $maxLag ) {
+ global $wgLoadBalancer;
+ if ( $maxLag ) {
+ list( $host, $lag ) = $wgLoadBalancer->getMaxLag();
+ while ( $lag > $maxLag ) {
+ $name = @gethostbyaddr( $host );
+ if ( $name !== false ) {
+ $host = $name;
+ }
+ print "Waiting for $host (lagged $lag seconds)...\n";
+ sleep($maxLag);
+ list( $host, $lag ) = $wgLoadBalancer->getMaxLag();
+ }
+ }
+}
+
+
+
+?>
diff --git a/maintenance/convertLinks.inc b/maintenance/convertLinks.inc
new file mode 100644
index 00000000..f0d2c439
--- /dev/null
+++ b/maintenance/convertLinks.inc
@@ -0,0 +1,220 @@
+<?php
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+function convertLinks() {
+ global $wgDBtype;
+ if( $wgDBtype == 'PostgreSQL' ) {
+ print "Links table already ok on PostgreSQL.\n";
+ return;
+ }
+
+ print "Converting links table to ID-ID...\n";
+
+ global $wgLang, $wgDBserver, $wgDBadminuser, $wgDBadminpassword, $wgDBname;
+ global $noKeys, $logPerformance, $fh;
+
+ $numRows = $tuplesAdded = $numBadLinks = $curRowsRead = 0; #counters etc
+ $totalTuplesInserted = 0; # total tuples INSERTed into links_temp
+
+ $reportCurReadProgress = true; #whether or not to give progress reports while reading IDs from cur table
+ $curReadReportInterval = 1000; #number of rows between progress reports
+
+ $reportLinksConvProgress = true; #whether or not to give progress reports during conversion
+ $linksConvInsertInterval = 1000; #number of rows per INSERT
+
+ $initialRowOffset = 0;
+ #$finalRowOffset = 0; # not used yet; highest row number from links table to process
+
+ # Overwrite the old links table with the new one. If this is set to false,
+ # the new table will be left at links_temp.
+ $overwriteLinksTable = true;
+
+ # Don't create keys, and so allow duplicates in the new links table.
+ # This gives a huge speed improvement for very large links tables which are MyISAM. (What about InnoDB?)
+ $noKeys = false;
+
+
+ $logPerformance = false; # output performance data to a file
+ $perfLogFilename = "convLinksPerf.txt";
+ #--------------------------------------------------------------------
+
+ $dbw =& wfGetDB( DB_MASTER );
+ extract( $dbw->tableNames( 'cur', 'links', 'links_temp', 'links_backup' ) );
+
+ $res = $dbw->query( "SELECT l_from FROM $links LIMIT 1" );
+ if ( $dbw->fieldType( $res, 0 ) == "int" ) {
+ print "Schema already converted\n";
+ return;
+ }
+
+ $res = $dbw->query( "SELECT COUNT(*) AS count FROM $links" );
+ $row = $dbw->fetchObject($res);
+ $numRows = $row->count;
+ $dbw->freeResult( $res );
+
+ if ( $numRows == 0 ) {
+ print "Updating schema (no rows to convert)...\n";
+ createTempTable();
+ } else {
+ if ( $logPerformance ) { $fh = fopen ( $perfLogFilename, "w" ); }
+ $baseTime = $startTime = getMicroTime();
+ # Create a title -> cur_id map
+ print "Loading IDs from $cur table...\n";
+ performanceLog ( "Reading $numRows rows from cur table...\n" );
+ performanceLog ( "rows read vs seconds elapsed:\n" );
+
+ $dbw->bufferResults( false );
+ $res = $dbw->query( "SELECT cur_namespace,cur_title,cur_id FROM $cur" );
+ $ids = array();
+
+ while ( $row = $dbw->fetchObject( $res ) ) {
+ $title = $row->cur_title;
+ if ( $row->cur_namespace ) {
+ $title = $wgLang->getNsText( $row->cur_namespace ) . ":$title";
+ }
+ $ids[$title] = $row->cur_id;
+ $curRowsRead++;
+ if ($reportCurReadProgress) {
+ if (($curRowsRead % $curReadReportInterval) == 0) {
+ performanceLog( $curRowsRead . " " . (getMicroTime() - $baseTime) . "\n" );
+ print "\t$curRowsRead rows of $cur table read.\n";
+ }
+ }
+ }
+ $dbw->freeResult( $res );
+ $dbw->bufferResults( true );
+ print "Finished loading IDs.\n\n";
+ performanceLog( "Took " . (getMicroTime() - $baseTime) . " seconds to load IDs.\n\n" );
+ #--------------------------------------------------------------------
+
+ # Now, step through the links table (in chunks of $linksConvInsertInterval rows),
+ # convert, and write to the new table.
+ createTempTable();
+ performanceLog( "Resetting timer.\n\n" );
+ $baseTime = getMicroTime();
+ print "Processing $numRows rows from $links table...\n";
+ performanceLog( "Processing $numRows rows from $links table...\n" );
+ performanceLog( "rows inserted vs seconds elapsed:\n" );
+
+ for ($rowOffset = $initialRowOffset; $rowOffset < $numRows; $rowOffset += $linksConvInsertInterval) {
+ $sqlRead = "SELECT * FROM $links ";
+ $sqlRead = $dbw->limitResult($sqlRead, $linksConvInsertInterval,$rowOffset);
+ $res = $dbw->query($sqlRead);
+ if ( $noKeys ) {
+ $sqlWrite = array("INSERT INTO $links_temp (l_from,l_to) VALUES ");
+ } else {
+ $sqlWrite = array("INSERT IGNORE INTO $links_temp (l_from,l_to) VALUES ");
+ }
+
+ $tuplesAdded = 0; # no tuples added to INSERT yet
+ while ( $row = $dbw->fetchObject($res) ) {
+ $fromTitle = $row->l_from;
+ if ( array_key_exists( $fromTitle, $ids ) ) { # valid title
+ $from = $ids[$fromTitle];
+ $to = $row->l_to;
+ if ( $tuplesAdded != 0 ) {
+ $sqlWrite[] = ",";
+ }
+ $sqlWrite[] = "($from,$to)";
+ $tuplesAdded++;
+ } else { # invalid title
+ $numBadLinks++;
+ }
+ }
+ $dbw->freeResult($res);
+ #print "rowOffset: $rowOffset\ttuplesAdded: $tuplesAdded\tnumBadLinks: $numBadLinks\n";
+ if ( $tuplesAdded != 0 ) {
+ if ($reportLinksConvProgress) {
+ print "Inserting $tuplesAdded tuples into $links_temp...";
+ }
+ $dbw->query( implode("",$sqlWrite) );
+ $totalTuplesInserted += $tuplesAdded;
+ if ($reportLinksConvProgress)
+ print " done. Total $totalTuplesInserted tuples inserted.\n";
+ performanceLog( $totalTuplesInserted . " " . (getMicroTime() - $baseTime) . "\n" );
+ }
+ }
+ print "$totalTuplesInserted valid titles and $numBadLinks invalid titles were processed.\n\n";
+ performanceLog( "$totalTuplesInserted valid titles and $numBadLinks invalid titles were processed.\n" );
+ performanceLog( "Total execution time: " . (getMicroTime() - $startTime) . " seconds.\n" );
+ if ( $logPerformance ) { fclose ( $fh ); }
+ }
+ #--------------------------------------------------------------------
+
+ if ( $overwriteLinksTable ) {
+ $dbConn = Database::newFromParams( $wgDBserver, $wgDBadminuser, $wgDBadminpassword, $wgDBname );
+ if (!($dbConn->isOpen())) {
+ print "Opening connection to database failed.\n";
+ return;
+ }
+ # Check for existing links_backup, and delete it if it exists.
+ print "Dropping backup links table if it exists...";
+ $dbConn->query( "DROP TABLE IF EXISTS $links_backup", DB_MASTER);
+ print " done.\n";
+
+ # Swap in the new table, and move old links table to links_backup
+ print "Swapping tables '$links' to '$links_backup'; '$links_temp' to '$links'...";
+ $dbConn->query( "RENAME TABLE links TO $links_backup, $links_temp TO $links", DB_MASTER );
+ print " done.\n\n";
+
+ $dbConn->close();
+ print "Conversion complete. The old table remains at $links_backup;\n";
+ print "delete at your leisure.\n";
+ } else {
+ print "Conversion complete. The converted table is at $links_temp;\n";
+ print "the original links table is unchanged.\n";
+ }
+}
+
+#--------------------------------------------------------------------
+
+function createTempTable() {
+ global $wgDBserver, $wgDBadminuser, $wgDBadminpassword, $wgDBname;
+ global $noKeys;
+ $dbConn = Database::newFromParams( $wgDBserver, $wgDBadminuser, $wgDBadminpassword, $wgDBname );
+
+ if (!($dbConn->isOpen())) {
+ print "Opening connection to database failed.\n";
+ return;
+ }
+ $links_temp = $dbConn->tableName( 'links_temp' );
+
+ print "Dropping temporary links table if it exists...";
+ $dbConn->query( "DROP TABLE IF EXISTS $links_temp");
+ print " done.\n";
+
+ print "Creating temporary links table...";
+ if ( $noKeys ) {
+ $dbConn->query( "CREATE TABLE $links_temp ( " .
+ "l_from int(8) unsigned NOT NULL default '0', " .
+ "l_to int(8) unsigned NOT NULL default '0')");
+ } else {
+ $dbConn->query( "CREATE TABLE $links_temp ( " .
+ "l_from int(8) unsigned NOT NULL default '0', " .
+ "l_to int(8) unsigned NOT NULL default '0', " .
+ "UNIQUE KEY l_from(l_from,l_to), " .
+ "KEY (l_to))");
+ }
+ print " done.\n\n";
+}
+
+function performanceLog( $text ) {
+ global $logPerformance, $fh;
+ if ( $logPerformance ) {
+ fwrite( $fh, $text );
+ }
+}
+
+function getMicroTime() { # return time in seconds, with microsecond accuracy
+ list($usec, $sec) = explode(" ", microtime());
+ return ((float)$usec + (float)$sec);
+}
+
+
+
+?>
diff --git a/maintenance/convertLinks.php b/maintenance/convertLinks.php
new file mode 100644
index 00000000..5939b943
--- /dev/null
+++ b/maintenance/convertLinks.php
@@ -0,0 +1,16 @@
+<?php
+/**
+ * Convert from the old links schema (string->ID) to the new schema (ID->ID)
+ * The wiki should be put into read-only mode while this script executes
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once( "commandLine.inc" );
+require_once( "convertLinks.inc" );
+
+convertLinks();
+
+?>
diff --git a/maintenance/counter.php b/maintenance/counter.php
new file mode 100644
index 00000000..d84c877d
--- /dev/null
+++ b/maintenance/counter.php
@@ -0,0 +1,5 @@
+<?php
+function print_c($last, $current) {
+ echo str_repeat( chr(8), strlen( $last ) ) . $current;
+}
+?>
diff --git a/maintenance/createAndPromote.php b/maintenance/createAndPromote.php
new file mode 100644
index 00000000..df29c114
--- /dev/null
+++ b/maintenance/createAndPromote.php
@@ -0,0 +1,48 @@
+<?php
+
+/**
+ * Maintenance script to create an account and grant it administrator rights
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+require_once( 'commandLine.inc' );
+
+if( !count( $args ) == 2 ) {
+ echo( "Please provide a username and password for the new account.\n" );
+ die( 1 );
+}
+
+$username = $args[0];
+$password = $args[1];
+
+global $wgDBname;
+echo( "{$wgDBname}: Creating and promoting User:{$username}..." );
+
+# Validate username and check it doesn't exist
+$user = User::newFromName( $username );
+if( !is_object( $user ) ) {
+ echo( "invalid username.\n" );
+ die( 1 );
+} elseif( 0 != $user->idForName() ) {
+ echo( "account exists.\n" );
+ die( 1 );
+}
+
+# Insert the account into the database
+$user->addToDatabase();
+$user->setPassword( $password );
+$user->setToken();
+
+# Promote user
+$user->addGroup( 'sysop' );
+
+# Increment site_stats.ss_users
+$ssu = new SiteStatsUpdate( 0, 0, 0, 0, 1 );
+$ssu->doUpdate();
+
+echo( "done.\n" );
+
+?> \ No newline at end of file
diff --git a/maintenance/database.sql b/maintenance/database.sql
new file mode 100644
index 00000000..dea99542
--- /dev/null
+++ b/maintenance/database.sql
@@ -0,0 +1,7 @@
+-- SQL script to create database for wiki. This is run from
+-- the installation script which replaces the variables with
+-- their values from local settings.
+--
+
+DROP DATABASE IF EXISTS `{$wgDBname}`;
+CREATE DATABASE `{$wgDBname}`;
diff --git a/maintenance/delete-idle-wiki-users.pl b/maintenance/delete-idle-wiki-users.pl
new file mode 100644
index 00000000..aef68ccd
--- /dev/null
+++ b/maintenance/delete-idle-wiki-users.pl
@@ -0,0 +1,138 @@
+#!/usr/bin/perl
+#
+# Nuke idle wiki accounts from the wiki's user database.
+#
+# Copyright (C) 2005 Ralf Baechle (ralf@linux-mips.org)
+#
+# This program is free software; you can redistribute it and/or modify it
+# under the terms of the GNU General Public License as published by the
+# Free Software Foundation; either version 2 of the License, or (at your
+# option) any later version.
+#
+# THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
+# WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
+# NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+# NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
+# USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+# THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 675 Mass Ave, Cambridge, MA 02139, USA.
+#
+
+my $database = "DBI:mysql:database=wikidb;host=localhost";
+my $dbuser = "wikiuser";
+my $dbpasswd = "password";
+
+use strict;
+use DBI();
+
+my $verbose = 0;
+my $for_real = 1;
+
+sub do_db_op
+{
+ my ($dbh, $sql) = @_;
+
+ if ($verbose >= 3) {
+ print $sql . ";\n"
+ }
+
+ if ($for_real == 1) {
+ $dbh->do($sql);
+ }
+}
+
+sub undo_user
+{
+ my ($ref, $dbh, $sth, $killed);
+
+ # Connect to the database.
+ $dbh = DBI->connect($database, $dbuser, $dbpasswd, {RaiseError => 1});
+
+ $sth = $dbh->prepare("SELECT * FROM user");
+ $sth->execute();
+
+ $ref = $sth->fetchrow_hashref();
+
+ if ($sth->rows == 0) {
+ print "There is no user in this wiki.\n";
+ return;
+ }
+
+ while ($ref = $sth->fetchrow_hashref()) {
+ my ($user_id, $user_name, $cph, $oph, $edits);
+
+ $user_name = $ref->{user_name};
+ $user_id = $ref->{user_id};
+ if ($verbose >= 2) {
+ print "Annihilating user " . $user_name .
+ " has user_id " . $user_id . ".\n";
+ }
+
+ $cph = $dbh->prepare("SELECT * FROM cur where " .
+ "cur_user = $user_id" .
+ " AND " .
+ "cur_user_text = " . $dbh->quote("$user_name"));
+ $cph->execute();
+
+ $oph = $dbh->prepare("SELECT * FROM old where " .
+ "old_user = $user_id" .
+ " AND " .
+ "old_user_text = " . $dbh->quote("$user_name"));
+ $oph->execute();
+
+ $edits = $cph->rows + $oph->rows;
+
+ $cph->finish();
+ $oph->finish();
+
+ if ($edits == 0) {
+ if ($verbose >= 2) {
+ print "Keeping user " . $user_name .
+ ", user_id " . $user_id . ".\n";
+ }
+
+ do_db_op($dbh,
+ "DELETE FROM user WHERE user_name = " .
+ $dbh->quote("$user_name") .
+ " AND " .
+ "user_id = $user_id");
+
+ $killed++;
+ }
+ }
+
+ $sth->finish();
+
+ $dbh->disconnect();
+
+ if ($verbose >= 1) {
+ print "Killed " . $killed . " users\n";
+ }
+}
+
+my (@users, $user, $this, $opts);
+
+@users = ();
+$opts = 1;
+
+foreach $this (@ARGV) {
+ if ($opts == 1 && $this eq '-v') {
+ $verbose++;
+ } elsif ($opts == 1 && $this eq '--verbose') {
+ $verbose = 1;
+ } elsif ($opts == 1 && $this eq '--') {
+ $opts = 0;
+ } else {
+ push(@users, $this);
+ }
+}
+
+undo_user();
+
diff --git a/maintenance/deleteBatch.php b/maintenance/deleteBatch.php
new file mode 100644
index 00000000..697dffd7
--- /dev/null
+++ b/maintenance/deleteBatch.php
@@ -0,0 +1,85 @@
+<?php
+
+# delete a batch of pages
+# Usage: php deleteBatch.php [-u <user>] [-r <reason>] [-i <interval>] <listfile>
+# where
+# <listfile> is a file where each line has two titles separated by a pipe
+# character. The first title is the source, the second is the destination.
+# <user> is the username
+# <reason> is the move reason
+# <interval> is the number of seconds to sleep for after each move
+
+$oldCwd = getcwd();
+$optionsWithArgs = array( 'u', 'r', 'i' );
+require_once( 'commandLine.inc' );
+
+chdir( $oldCwd );
+
+# Options processing
+
+$filename = 'php://stdin';
+$user = 'Delete page script';
+$reason = '';
+$interval = 0;
+
+if ( isset( $args[0] ) ) {
+ $filename = $args[0];
+}
+if ( isset( $options['u'] ) ) {
+ $user = $options['u'];
+}
+if ( isset( $options['r'] ) ) {
+ $reason = $options['r'];
+}
+if ( isset( $options['i'] ) ) {
+ $interval = $options['i'];
+}
+
+$wgUser = User::newFromName( $user );
+
+
+# Setup complete, now start
+
+$file = fopen( $filename, 'r' );
+if ( !$file ) {
+ print "Unable to read file, exiting\n";
+ exit;
+}
+
+$dbw =& wfGetDB( DB_MASTER );
+
+for ( $linenum = 1; !feof( $file ); $linenum++ ) {
+ $line = trim( fgets( $file ) );
+ if ( $line === false ) {
+ break;
+ }
+ $page = Title::newFromText( $line );
+ if ( is_null( $page ) ) {
+ print "Invalid title '$line' on line $linenum\n";
+ continue;
+ }
+ if( !$page->exists() ) {
+ print "Skipping nonexistent page '$line'\n";
+ continue;
+ }
+
+
+ print $page->getPrefixedText();
+ $dbw->begin();
+ if( $page->getNamespace() == NS_IMAGE ) {
+ $art = new ImagePage( $page );
+ } else {
+ $art = new Article( $page );
+ }
+ $art->doDelete( $reason );
+ $dbw->immediateCommit();
+ print "\n";
+
+ if ( $interval ) {
+ sleep( $interval );
+ }
+ wfWaitForSlaves( 5 );
+}
+
+
+?>
diff --git a/maintenance/deleteImageMemcached.php b/maintenance/deleteImageMemcached.php
new file mode 100644
index 00000000..4e17d21e
--- /dev/null
+++ b/maintenance/deleteImageMemcached.php
@@ -0,0 +1,60 @@
+<?php
+// php deleteImageMemcached.php --until "2005-09-05 00:00:00" --sleep 0 --report 10
+$optionsWithArgs = array( 'until', 'sleep', 'report' );
+
+require_once 'commandLine.inc';
+
+class DeleteImageCache {
+ var $until, $sleep, $report;
+
+ function DeleteImageCache( $until, $sleep, $report ) {
+ $this->until = $until;
+ $this->sleep = $sleep;
+ $this->report = $report;
+ }
+
+ function main() {
+ global $wgMemc, $wgDBname;
+ $fname = 'DeleteImageCache::main';
+
+ ini_set( 'display_errors', false );
+
+ $dbr =& wfGetDB( DB_SLAVE );
+
+ $res = $dbr->select( 'image',
+ array( 'img_name' ),
+ array( "img_timestamp < {$this->until}" ),
+ $fname
+ );
+
+ $i = 0;
+ $total = $this->getImageCount();
+
+ while ( $row = $dbr->fetchObject( $res ) ) {
+ if ($i % $this->report == 0)
+ printf("%s: %13s done (%s)\n", $wgDBname, "$i/$total", wfPercent( $i / $total * 100 ));
+ $md5 = md5( $row->img_name );
+ $wgMemc->delete( "$wgDBname:Image:$md5" );
+
+ if ($this->sleep != 0)
+ usleep( $this->sleep );
+
+ ++$i;
+ }
+ }
+
+ function getImageCount() {
+ $fname = 'DeleteImageCache::getImageCount';
+
+ $dbr =& wfGetDB( DB_SLAVE );
+ return $dbr->selectField( 'image', 'COUNT(*)', array(), $fname );
+ }
+}
+
+$until = preg_replace( "/[^\d]/", '', $options['until'] );
+$sleep = (int)$options['sleep'] * 1000; // milliseconds
+$report = (int)$options['report'];
+
+$dic = new DeleteImageCache( $until, $sleep, $report );
+$dic->main();
+?>
diff --git a/maintenance/deleteOldRevisions.inc b/maintenance/deleteOldRevisions.inc
new file mode 100644
index 00000000..dd48028a
--- /dev/null
+++ b/maintenance/deleteOldRevisions.inc
@@ -0,0 +1,60 @@
+<?php
+
+/**
+ * Support functions for the deleteOldRevisions script
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+require_once( 'purgeOldText.inc' );
+
+function DeleteOldRevisions( $delete = false ) {
+
+ # Data should come off the master, wrapped in a transaction
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->begin();
+
+ $tbl_pag = $dbw->tableName( 'page' );
+ $tbl_rev = $dbw->tableName( 'revision' );
+
+ # Get "active" revisions from the page table
+ echo( "Searching for active revisions..." );
+ $res = $dbw->query( "SELECT page_latest FROM $tbl_pag" );
+ while( $row = $dbw->fetchObject( $res ) ) {
+ $cur[] = $row->page_latest;
+ }
+ echo( "done.\n" );
+
+ # Get all revisions that aren't in this set
+ echo( "Searching for inactive revisions..." );
+ $set = implode( ', ', $cur );
+ $res = $dbw->query( "SELECT rev_id FROM $tbl_rev WHERE rev_id NOT IN ( $set )" );
+ while( $row = $dbw->fetchObject( $res ) ) {
+ $old[] = $row->rev_id;
+ }
+ echo( "done.\n" );
+
+ # Inform the user of what we're going to do
+ $count = count( $old );
+ echo( "$count old revisions found.\n" );
+
+ # Delete as appropriate
+ if( $delete && $count ) {
+ echo( "Deleting..." );
+ $set = implode( ', ', $old );
+ $dbw->query( "DELETE FROM $tbl_rev WHERE rev_id IN ( $set )" );
+ echo( "done.\n" );
+ }
+
+ # This bit's done
+ # Purge redundant text records
+ $dbw->commit();
+ if( $delete ) {
+ PurgeRedundantText( true );
+ }
+
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/deleteOldRevisions.php b/maintenance/deleteOldRevisions.php
new file mode 100644
index 00000000..9695a8c5
--- /dev/null
+++ b/maintenance/deleteOldRevisions.php
@@ -0,0 +1,30 @@
+<?php
+
+/**
+ * Delete old (non-current) revisions from the database
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+$options = array( 'delete', 'help' );
+require_once( 'commandLine.inc' );
+require_once( 'deleteOldRevisions.inc' );
+
+echo( "Delete Old Revisions\n\n" );
+
+if( @$options['help'] ) {
+ ShowUsage();
+} else {
+ DeleteOldRevisions( @$options['delete'] );
+}
+
+function ShowUsage() {
+ echo( "Deletes non-current revisions from the database.\n\n" );
+ echo( "Usage: php deleteOldRevisions.php [--delete|--help]\n\n" );
+ echo( "delete : Performs the deletion\n" );
+ echo( " help : Show this usage information\n" );
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/deleteOrphanedRevisions.inc.php b/maintenance/deleteOrphanedRevisions.inc.php
new file mode 100644
index 00000000..7cfb1c6b
--- /dev/null
+++ b/maintenance/deleteOrphanedRevisions.inc.php
@@ -0,0 +1,33 @@
+<?php
+
+/**
+ * Support functions for the deleteOrphanedRevisions maintenance script
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+/**
+ * Delete one or more revisions from the database
+ * Do this inside a transaction
+ *
+ * @param $id Array of revision id values
+ * @param $db Database class (needs to be a master)
+ */
+function deleteRevisions( $id, &$dbw ) {
+ if( !is_array( $id ) )
+ $id = array( $id );
+ $dbw->delete( 'revision', array( 'rev_id' => $id ), 'deleteRevision' );
+}
+
+/**
+ * Spit out script usage information and exit
+ */
+function showUsage() {
+ echo( "Finds revisions which refer to nonexisting pages and deletes them from the database\n" );
+ echo( "USAGE: php deleteOrphanedRevisions.php [--report]\n\n" );
+ echo( " --report : Prints out a count of affected revisions but doesn't delete them\n\n" );
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/deleteOrphanedRevisions.php b/maintenance/deleteOrphanedRevisions.php
new file mode 100644
index 00000000..b4f5b517
--- /dev/null
+++ b/maintenance/deleteOrphanedRevisions.php
@@ -0,0 +1,55 @@
+<?php
+
+/**
+ * Maintenance script to delete revisions which refer to a nonexisting page
+ * Sometimes manual deletion done in a rush leaves crap in the database
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ * @todo More efficient cleanup of text records
+ */
+
+$options = array( 'report', 'help' );
+require_once( 'commandLine.inc' );
+require_once( 'deleteOrphanedRevisions.inc.php' );
+echo( "Delete Orphaned Revisions\n" );
+
+if( isset( $options['help'] ) )
+ showUsage();
+
+$report = isset( $options['report'] );
+
+$dbw =& wfGetDB( DB_MASTER );
+$dbw->immediateBegin();
+extract( $dbw->tableNames( 'page', 'revision' ) );
+
+# Find all the orphaned revisions
+echo( "Checking for orphaned revisions..." );
+$sql = "SELECT rev_id FROM {$revision} LEFT JOIN {$page} ON rev_page = page_id WHERE page_namespace IS NULL";
+$res = $dbw->query( $sql, 'deleteOrphanedRevisions' );
+
+# Stash 'em all up for deletion (if needed)
+while( $row = $dbw->fetchObject( $res ) )
+ $revisions[] = $row->rev_id;
+$dbw->freeResult( $res );
+$count = count( $revisions );
+echo( "found {$count}.\n" );
+
+# Nothing to do?
+if( $report || $count == 0 ) {
+ $dbw->immediateCommit();
+ exit();
+}
+
+# Delete each revision
+echo( "Deleting..." );
+deleteRevisions( $revisions, $dbw );
+echo( "done.\n" );
+
+# Close the transaction and call the script to purge unused text records
+$dbw->immediateCommit();
+require_once( 'purgeOldText.inc' );
+PurgeRedundantText( true );
+
+?> \ No newline at end of file
diff --git a/maintenance/deleteRevision.php b/maintenance/deleteRevision.php
new file mode 100644
index 00000000..e7d005b6
--- /dev/null
+++ b/maintenance/deleteRevision.php
@@ -0,0 +1,40 @@
+<?php
+require_once( 'commandLine.inc' );
+
+$dbw =& wfGetDB( DB_MASTER );
+
+if ( count( $args ) == 0 ) {
+ echo "Usage: php deleteRevision.php <revid> [<revid> ...]\n";
+ exit(1);
+}
+
+echo "Deleting revision(s) " . implode( ',', $args ) . " from $wgDBname...\n";
+
+$affected = 0;
+foreach ( $args as $revID ) {
+ $dbw->insertSelect( 'archive', array( 'page', 'revision' ),
+ array(
+ 'ar_namespace' => 'page_namespace',
+ 'ar_title' => 'page_title',
+ 'ar_comment' => 'rev_comment',
+ 'ar_user' => 'rev_user',
+ 'ar_user_text' => 'rev_user_text',
+ 'ar_timestamp' => 'rev_timestamp',
+ 'ar_minor_edit' => 'rev_minor_edit',
+ 'ar_rev_id' => 'rev_id',
+ 'ar_text_id' => 'rev_text_id',
+ ), array(
+ 'rev_id' => $revID,
+ 'page_id = rev_page'
+ ), $fname
+ );
+ if ( !$dbw->affectedRows() ) {
+ echo "Revision $revID not found\n";
+ } else {
+ $affected += $dbw->affectedRows();
+ $dbw->delete( 'revision', array( 'rev_id' => $revID ) );
+ }
+}
+
+print "Deleted $affected revisions\n";
+?>
diff --git a/maintenance/diffLanguage.php b/maintenance/diffLanguage.php
new file mode 100644
index 00000000..eb87b3ba
--- /dev/null
+++ b/maintenance/diffLanguage.php
@@ -0,0 +1,159 @@
+<?php
+# MediaWiki web-based config/installation
+# Copyright (C) 2004 Ashar Voultoiz <thoane@altern.org> and others
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+/**
+ * Usage: php DiffLanguage.php [lang [file]]
+ *
+ * lang: Enter the language code following "Language" of the LanguageXX.php you
+ * want to check. If using linux you might need to follow case aka Zh and not
+ * zh.
+ *
+ * file: A php language file you want to include to compare mediawiki
+ * Language{Lang}.php against (for example Special:Allmessages PHP output).
+ *
+ * The goal is to get a list of messages not yet localised in a languageXX.php
+ * file using the language.php file as reference.
+ *
+ * The script then print a list of wgAllMessagesXX keys that aren't localised, a
+ * percentage of messages correctly localised and the number of messages to be
+ * translated.
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** This script run from the commandline */
+require_once( 'parserTests.inc' );
+require_once( 'commandLine.inc' );
+
+if( isset($options['help']) ) { usage(); wfDie(); }
+
+$wgLanguageCode = ucfirstlcrest($wgLanguageCode);
+/** Language messages we will use as reference. By default 'en' */
+$referenceMessages = $wgAllMessagesEn;
+$referenceLanguage = 'En';
+$referenceFilename = 'Language'.$referenceLanguage.'.php';
+/** Language messages we will test. */
+$testMessages = array();
+$testLanguage = '';
+/** whereas we use an external language file */
+$externalRef = false;
+
+# FUNCTIONS
+/** @todo more informations !! */
+function usage() {
+echo 'php DiffLanguage.php [lang [file]] [--color=(yes|no|light)]'."\n";
+}
+
+/** Return a given string with first letter upper case, the rest lowercase */
+function ucfirstlcrest($string) {
+ return strtoupper(substr($string,0,1)).strtolower(substr($string,1));
+}
+
+/**
+ * Return a $wgAllmessages array shipped in MediaWiki
+ * @param string $languageCode Formated language code
+ * @return array The MediaWiki default $wgAllMessages array requested
+ */
+function getMediawikiMessages($languageCode = 'En') {
+
+ $foo = "wgAllMessages$languageCode";
+ global $$foo, $wgSkinNamesEn;
+
+ // it might already be loaded in LocalSettings.php
+ if(!isset($$foo)) {
+ global $IP;
+ $langFile = $IP.'/languages/Language'.$languageCode.'.php';
+ if (file_exists( $langFile ) ) {
+ print "Including $langFile\n";
+ global $wgNamespaceNamesEn;
+ include($langFile);
+ } else wfDie("ERROR: The file $langFile does not exist !\n");
+ }
+ return $$foo;
+}
+
+/**
+ * Return a $wgAllmessages array in a given file. Language of the array
+ * need to be given cause we can not detect which language it provides
+ * @param string $filename Filename of the file containing a message array
+ * @param string $languageCode Language of the external array
+ * @return array A $wgAllMessages array from an external file.
+ */
+function getExternalMessages($filename, $languageCode) {
+ print "Including external file $filename.\n";
+ include($filename);
+ $foo = "wgAllMessages$languageCode";
+ return $$foo;
+}
+
+# MAIN ENTRY
+if ( isset($args[0]) ) {
+ $lang = ucfirstlcrest($args[0],1);
+
+ // eventually against another language file we will use as reference instead
+ // of the default english language.
+ if( isset($args[1])) {
+ // we assume the external file contain an array of messages for the
+ // lang we are testing
+ $referenceMessages = getExternalMessages( $args[1], $lang );
+ $referenceLanguage = $lang;
+ $referenceFilename = $args[1];
+ $externalRef = true;
+ }
+
+ // Load datas from MediaWiki
+ $testMessages = getMediawikiMessages($lang);
+ $testLanguage = $lang;
+} else {
+ usage();
+ wfDie();
+}
+
+/** parsertest is used to do differences */
+$myParserTest =& new ParserTest();
+
+# Get all references messages and check if they exist in the tested language
+$i = 0;
+
+$msg = "MW Language{$testLanguage}.php against ";
+if($externalRef) { $msg .= 'external file '; }
+else { $msg .= 'internal file '; }
+$msg .= $referenceFilename.' ('.$referenceLanguage."):\n----\n";
+echo $msg;
+
+// process messages
+foreach($referenceMessages as $index => $ref)
+{
+ // message is not localized
+ if(!(isset($testMessages[$index]))) {
+ $i++;
+ print "'$index' => \"$ref\",\n";
+ // Messages in the same language differs
+ } elseif( ($lang == $referenceLanguage) AND ($testMessages[$index] != $ref)) {
+ print "\n$index differs:\n";
+ print $myParserTest->quickDiff($testMessages[$index],$ref,'tested','reference');
+ }
+}
+
+echo "\n----\n".$msg;
+echo "$referenceLanguage language is complete at ".number_format((100 - $i/count($wgAllMessagesEn) * 100),2)."%\n";
+echo "$i unlocalised messages of the ".count($wgAllMessagesEn)." messages available.\n";
+?>
diff --git a/maintenance/dtrace/counts.d b/maintenance/dtrace/counts.d
new file mode 100644
index 00000000..13725d99
--- /dev/null
+++ b/maintenance/dtrace/counts.d
@@ -0,0 +1,23 @@
+/*
+ * This software is in the public domain.
+ *
+ * $Id: counts.d 10510 2005-08-15 01:46:19Z kateturner $
+ */
+
+#pragma D option quiet
+
+self int tottime;
+BEGIN {
+ tottime = timestamp;
+}
+
+php$target:::function-entry
+ @counts[copyinstr(arg0)] = count();
+}
+
+END {
+ printf("Total time: %dus\n", (timestamp - tottime) / 1000);
+ printf("# calls by function:\n");
+ printa("%-40s %@d\n", @counts);
+}
+
diff --git a/maintenance/dtrace/tree.d b/maintenance/dtrace/tree.d
new file mode 100644
index 00000000..2f16e41d
--- /dev/null
+++ b/maintenance/dtrace/tree.d
@@ -0,0 +1,26 @@
+/*
+ * This software is in the public domain.
+ *
+ * $Id: tree.d 10510 2005-08-15 01:46:19Z kateturner $
+ */
+
+#pragma D option quiet
+
+self int indent;
+self int times[int];
+
+php$target:::function-entry
+{
+ @counts[copyinstr(arg0)] = count();
+ printf("%*s", self->indent, "");
+ printf("-> %s\n", copyinstr(arg0));
+ self->times[self->indent] = timestamp;
+ self->indent += 2;
+}
+
+php$target:::function-return
+{
+ self->indent -= 2;
+ printf("%*s", self->indent, "");
+ printf("<- %s %dus\n", copyinstr(arg0), (timestamp - self->times[self->indent]) / 1000);
+}
diff --git a/maintenance/dumpBackup.php b/maintenance/dumpBackup.php
new file mode 100644
index 00000000..1735422d
--- /dev/null
+++ b/maintenance/dumpBackup.php
@@ -0,0 +1,99 @@
+<?php
+/**
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @package MediaWiki
+ * @subpackage SpecialPage
+ */
+
+$originalDir = getcwd();
+
+$optionsWithArgs = array( 'pagelist', 'start', 'end' );
+
+require_once( 'commandLine.inc' );
+require_once( 'SpecialExport.php' );
+require_once( 'maintenance/backup.inc' );
+
+$dumper = new BackupDumper( $argv );
+
+if( isset( $options['quiet'] ) ) {
+ $dumper->reporting = false;
+}
+
+if ( isset( $options['pagelist'] ) ) {
+ $olddir = getcwd();
+ chdir( $originalDir );
+ $pages = file( $options['pagelist'] );
+ chdir( $olddir );
+ if ( $pages === false ) {
+ wfDie( "Unable to open file {$options['pagelist']}\n" );
+ }
+ $pages = array_map( 'trim', $pages );
+ $dumper->pages = array_filter( $pages, create_function( '$x', 'return $x !== "";' ) );
+}
+
+if( isset( $options['start'] ) ) {
+ $dumper->startId = intval( $options['start'] );
+}
+if( isset( $options['end'] ) ) {
+ $dumper->endId = intval( $options['end'] );
+}
+$dumper->skipHeader = isset( $options['skip-header'] );
+$dumper->skipFooter = isset( $options['skip-footer'] );
+
+$textMode = isset( $options['stub'] ) ? MW_EXPORT_STUB : MW_EXPORT_TEXT;
+
+if( isset( $options['full'] ) ) {
+ $dumper->dump( MW_EXPORT_FULL, $textMode );
+} elseif( isset( $options['current'] ) ) {
+ $dumper->dump( MW_EXPORT_CURRENT, $textMode );
+} else {
+ $dumper->progress( <<<END
+This script dumps the wiki page database into an XML interchange wrapper
+format for export or backup.
+
+XML output is sent to stdout; progress reports are sent to stderr.
+
+Usage: php dumpBackup.php <action> [<options>]
+Actions:
+ --full Dump complete history of every page.
+ --current Includes only the latest revision of each page.
+
+Options:
+ --quiet Don't dump status reports to stderr.
+ --report=n Report position and speed after every n pages processed.
+ (Default: 100)
+ --server=h Force reading from MySQL server h
+ --start=n Start from page_id n
+ --end=n Stop before page_id n (exclusive)
+ --skip-header Don't output the <mediawiki> header
+ --skip-footer Don't output the </mediawiki> footer
+ --stub Don't perform old_text lookups; for 2-pass dump
+
+Fancy stuff:
+ --plugin=<class>[:<file>] Load a dump plugin class
+ --output=<type>:<file> Begin a filtered output stream;
+ <type>s: file, gzip, bzip2, 7zip
+ --filter=<type>[:<options>] Add a filter on an output branch
+
+END
+);
+}
+
+?>
diff --git a/maintenance/dumpHTML.inc b/maintenance/dumpHTML.inc
new file mode 100644
index 00000000..2ed1e4a2
--- /dev/null
+++ b/maintenance/dumpHTML.inc
@@ -0,0 +1,650 @@
+<?php
+/**
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+define( 'REPORTING_INTERVAL', 10 );
+
+require_once( 'includes/ImagePage.php' );
+require_once( 'includes/CategoryPage.php' );
+require_once( 'includes/RawPage.php' );
+
+class DumpHTML {
+ # Destination directory
+ var $dest;
+
+ # Show interlanguage links?
+ var $interwiki = true;
+
+ # Depth of HTML directory tree
+ var $depth = 3;
+
+ # Directory that commons images are copied into
+ var $sharedStaticPath;
+
+ # Relative path to image directory
+ var $imageRel = 'upload';
+
+ # Copy commons images instead of symlinking
+ var $forceCopy = false;
+
+ # Make links assuming the script path is in the same directory as
+ # the destination
+ var $alternateScriptPath = false;
+
+ # Original values of various globals
+ var $oldArticlePath = false, $oldCopyrightIcon = false;
+
+ # Has setupGlobals been called?
+ var $setupDone = false;
+
+ # List of raw pages used in the current article
+ var $rawPages;
+
+ # Skin to use
+ var $skin = 'dumphtml';
+
+ function DumpHTML( $settings ) {
+ foreach ( $settings as $var => $value ) {
+ $this->$var = $value;
+ }
+ }
+
+ /**
+ * Write a set of articles specified by start and end page_id
+ * Skip categories and images, they will be done separately
+ */
+ function doArticles( $start, $end = false ) {
+ $fname = 'DumpHTML::doArticles';
+
+ $this->setupGlobals();
+
+ if ( $end === false ) {
+ $dbr =& wfGetDB( DB_SLAVE );
+ $end = $dbr->selectField( 'page', 'max(page_id)', false, $fname );
+ }
+
+ $mainPageObj = Title::newMainPage();
+ $mainPage = $mainPageObj->getPrefixedDBkey();
+
+
+ for ($id = $start; $id <= $end; $id++) {
+ wfWaitForSlaves( 20 );
+ if ( !($id % REPORTING_INTERVAL) ) {
+ print "Processing ID: $id\r";
+ }
+ if ( !($id % (REPORTING_INTERVAL*10) ) ) {
+ print "\n";
+ }
+ $title = Title::newFromID( $id );
+ if ( $title ) {
+ $ns = $title->getNamespace() ;
+ if ( $ns != NS_CATEGORY && $title->getPrefixedDBkey() != $mainPage ) {
+ $this->doArticle( $title );
+ }
+ }
+ }
+ print "\n";
+ }
+
+ function doSpecials() {
+ $this->doMainPage();
+
+ $this->setupGlobals();
+ print "Special:Categories...";
+ $this->doArticle( Title::makeTitle( NS_SPECIAL, 'Categories' ) );
+ print "\n";
+ }
+
+ /** Write the main page as index.html */
+ function doMainPage() {
+
+ print "Making index.html ";
+
+ // Set up globals with no ../../.. in the link URLs
+ $this->setupGlobals( 0 );
+
+ $title = Title::newMainPage();
+ $text = $this->getArticleHTML( $title );
+ $file = fopen( "{$this->dest}/index.html", "w" );
+ if ( !$file ) {
+ print "\nCan't open index.html for writing\n";
+ return false;
+ }
+ fwrite( $file, $text );
+ fclose( $file );
+ print "\n";
+ }
+
+ function doImageDescriptions() {
+ global $wgSharedUploadDirectory;
+
+ $fname = 'DumpHTML::doImageDescriptions';
+
+ $this->setupGlobals();
+
+ /**
+ * Dump image description pages that don't have an associated article, but do
+ * have a local image
+ */
+ $dbr =& wfGetDB( DB_SLAVE );
+ extract( $dbr->tableNames( 'image', 'page' ) );
+ $res = $dbr->select( 'image', array( 'img_name' ), false, $fname );
+
+ $i = 0;
+ print "Writing image description pages for local images\n";
+ $num = $dbr->numRows( $res );
+ while ( $row = $dbr->fetchObject( $res ) ) {
+ wfWaitForSlaves( 10 );
+ if ( !( ++$i % REPORTING_INTERVAL ) ) {
+ print "Done $i of $num\r";
+ }
+ $title = Title::makeTitle( NS_IMAGE, $row->img_name );
+ if ( $title->getArticleID() ) {
+ // Already done by dumpHTML
+ continue;
+ }
+ $this->doArticle( $title );
+ }
+ print "\n";
+
+ /**
+ * Dump images which only have a real description page on commons
+ */
+ print "Writing description pages for commons images\n";
+ $i = 0;
+ for ( $hash = 0; $hash < 256; $hash++ ) {
+ $dir = sprintf( "%01x/%02x", intval( $hash / 16 ), $hash );
+ $paths = array_merge( glob( "{$this->sharedStaticPath}/$dir/*" ),
+ glob( "{$this->sharedStaticPath}/thumb/$dir/*" ) );
+
+ foreach ( $paths as $path ) {
+ $file = basename( $path );
+ if ( !(++$i % REPORTING_INTERVAL ) ) {
+ print "$i\r";
+ }
+
+ $title = Title::makeTitle( NS_IMAGE, $file );
+ $this->doArticle( $title );
+ }
+ }
+ print "\n";
+ }
+
+ function doCategories() {
+ $fname = 'DumpHTML::doCategories';
+ $this->setupGlobals();
+
+ $dbr =& wfGetDB( DB_SLAVE );
+ print "Selecting categories...";
+ $sql = 'SELECT DISTINCT cl_to FROM ' . $dbr->tableName( 'categorylinks' );
+ $res = $dbr->query( $sql, $fname );
+
+ print "\nWriting " . $dbr->numRows( $res ). " category pages\n";
+ $i = 0;
+ while ( $row = $dbr->fetchObject( $res ) ) {
+ wfWaitForSlaves( 10 );
+ if ( !(++$i % REPORTING_INTERVAL ) ) {
+ print "$i\r";
+ }
+ $title = Title::makeTitle( NS_CATEGORY, $row->cl_to );
+ $this->doArticle( $title );
+ }
+ print "\n";
+ }
+
+ function doRedirects() {
+ print "Doing redirects...\n";
+ $fname = 'DumpHTML::doRedirects';
+ $this->setupGlobals();
+ $dbr =& wfGetDB( DB_SLAVE );
+
+ $res = $dbr->select( 'page', array( 'page_namespace', 'page_title' ),
+ array( 'page_is_redirect' => 1 ), $fname );
+ $num = $dbr->numRows( $res );
+ print "$num redirects to do...\n";
+ $i = 0;
+ while ( $row = $dbr->fetchObject( $res ) ) {
+ $title = Title::makeTitle( $row->page_namespace, $row->page_title );
+ if ( !(++$i % (REPORTING_INTERVAL*10) ) ) {
+ print "Done $i of $num\n";
+ }
+ $this->doArticle( $title );
+ }
+ }
+
+ /** Write an article specified by title */
+ function doArticle( $title ) {
+ global $wgTitle, $wgSharedUploadPath, $wgSharedUploadDirectory;
+ global $wgUploadDirectory;
+
+ $this->rawPages = array();
+ $text = $this->getArticleHTML( $title );
+
+ if ( $text === false ) {
+ return;
+ }
+
+ # Parse the XHTML to find the images
+ $images = $this->findImages( $text );
+ $this->copyImages( $images );
+
+ # Write to file
+ $this->writeArticle( $title, $text );
+
+ # Do raw pages
+ wfMkdirParents( "{$this->dest}/raw", 0755 );
+ foreach( $this->rawPages as $record ) {
+ list( $file, $title, $params ) = $record;
+
+ $path = "{$this->dest}/raw/$file";
+ if ( !file_exists( $path ) ) {
+ $article = new Article( $title );
+ $request = new FauxRequest( $params );
+ $rp = new RawPage( $article, $request );
+ $text = $rp->getRawText();
+
+ print "Writing $file\n";
+ $file = fopen( $path, 'w' );
+ if ( !$file ) {
+ print("Can't open file $fullName for writing\n");
+ continue;
+ }
+ fwrite( $file, $text );
+ fclose( $file );
+ }
+ }
+ }
+
+ /** Write the given text to the file identified by the given title object */
+ function writeArticle( &$title, $text ) {
+ $filename = $this->getHashedFilename( $title );
+ $fullName = "{$this->dest}/$filename";
+ $fullDir = dirname( $fullName );
+
+ wfMkdirParents( $fullDir, 0755 );
+
+ $file = fopen( $fullName, 'w' );
+ if ( !$file ) {
+ print("Can't open file $fullName for writing\n");
+ return;
+ }
+
+ fwrite( $file, $text );
+ fclose( $file );
+ }
+
+ /** Set up globals required for parsing */
+ function setupGlobals( $currentDepth = NULL ) {
+ global $wgUser, $wgTitle, $wgStylePath, $wgArticlePath, $wgMathPath;
+ global $wgUploadPath, $wgLogo, $wgMaxCredits, $wgSharedUploadPath;
+ global $wgHideInterlanguageLinks, $wgUploadDirectory, $wgThumbnailScriptPath;
+ global $wgSharedThumbnailScriptPath, $wgEnableParserCache, $wgHooks, $wgServer;
+ global $wgRightsUrl, $wgRightsText, $wgCopyrightIcon;
+
+ static $oldLogo = NULL;
+
+ if ( !$this->setupDone ) {
+ $wgHooks['GetLocalURL'][] =& $this;
+ $wgHooks['GetFullURL'][] =& $this;
+ $this->oldArticlePath = $wgServer . $wgArticlePath;
+ }
+
+ if ( is_null( $currentDepth ) ) {
+ $currentDepth = $this->depth;
+ }
+
+ if ( $this->alternateScriptPath ) {
+ if ( $currentDepth == 0 ) {
+ $wgScriptPath = '.';
+ } else {
+ $wgScriptPath = '..' . str_repeat( '/..', $currentDepth - 1 );
+ }
+ } else {
+ $wgScriptPath = '..' . str_repeat( '/..', $currentDepth );
+ }
+
+ $wgArticlePath = str_repeat( '../', $currentDepth ) . '$1';
+
+ # Logo image
+ # Allow for repeated setup
+ if ( !is_null( $oldLogo ) ) {
+ $wgLogo = $oldLogo;
+ } else {
+ $oldLogo = $wgLogo;
+ }
+
+ if ( strpos( $wgLogo, $wgUploadPath ) === 0 ) {
+ # If it's in the upload directory, rewrite it to the new upload directory
+ $wgLogo = "$wgScriptPath/{$this->imageRel}/" . substr( $wgLogo, strlen( $wgUploadPath ) + 1 );
+ } elseif ( $wgLogo{0} == '/' ) {
+ # This is basically heuristic
+ # Rewrite an absolute logo path to one relative to the the script path
+ $wgLogo = $wgScriptPath . $wgLogo;
+ }
+
+ # Another ugly hack
+ if ( !$this->setupDone ) {
+ $this->oldCopyrightIcon = $wgCopyrightIcon;
+ }
+ $wgCopyrightIcon = str_replace( 'src="/images',
+ 'src="' . htmlspecialchars( $wgScriptPath ) . '/images', $this->oldCopyrightIcon );
+
+
+
+ $wgStylePath = "$wgScriptPath/skins";
+ $wgUploadPath = "$wgScriptPath/{$this->imageRel}";
+ $wgSharedUploadPath = "$wgUploadPath/shared";
+ $wgMaxCredits = -1;
+ $wgHideInterlanguageLinks = !$this->interwiki;
+ $wgThumbnailScriptPath = $wgSharedThumbnailScriptPath = false;
+ $wgEnableParserCache = false;
+ $wgMathPath = "$wgScriptPath/math";
+
+ if ( !empty( $wgRightsText ) ) {
+ $wgRightsUrl = "$wgScriptPath/COPYING.html";
+ }
+
+ $wgUser = new User;
+ $wgUser->setOption( 'skin', $this->skin );
+ $wgUser->setOption( 'editsection', 0 );
+
+ $this->sharedStaticPath = "$wgUploadDirectory/shared";
+
+ $this->setupDone = true;
+ }
+
+ /** Reads the content of a title object, executes the skin and captures the result */
+ function getArticleHTML( &$title ) {
+ global $wgOut, $wgTitle, $wgArticle, $wgUser;
+
+ $linkCache =& LinkCache::singleton();
+ $linkCache->clear();
+ $wgTitle = $title;
+ if ( is_null( $wgTitle ) ) {
+ return false;
+ }
+
+ $ns = $wgTitle->getNamespace();
+ if ( $ns == NS_SPECIAL ) {
+ $wgOut = new OutputPage;
+ $wgOut->setParserOptions( new ParserOptions );
+ SpecialPage::executePath( $wgTitle );
+ } else {
+ /** @todo merge with Wiki.php code */
+ if ( $ns == NS_IMAGE ) {
+ $wgArticle = new ImagePage( $wgTitle );
+ } elseif ( $ns == NS_CATEGORY ) {
+ $wgArticle = new CategoryPage( $wgTitle );
+ } else {
+ $wgArticle = new Article( $wgTitle );
+ }
+ $rt = Title::newFromRedirect( $wgArticle->fetchContent() );
+ if ( $rt != NULL ) {
+ return $this->getRedirect( $rt );
+ } else {
+ $wgOut = new OutputPage;
+ $wgOut->setParserOptions( new ParserOptions );
+
+ $wgArticle->view();
+ }
+ }
+
+ $sk =& $wgUser->getSkin();
+ ob_start();
+ $sk->outputPage( $wgOut );
+ $text = ob_get_contents();
+ ob_end_clean();
+
+ return $text;
+ }
+
+ function getRedirect( $rt ) {
+ $url = $rt->escapeLocalURL();
+ $text = $rt->getPrefixedText();
+ return <<<ENDTEXT
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+ <meta http-equiv="Content-Type" content="text/html; charset=UTF-8" />
+ <meta http-equiv="Refresh" content="0;url=$url" />
+</head>
+<body>
+ <p>Redirecting to <a href="$url">$text</a></p>
+</body>
+</html>
+ENDTEXT;
+ }
+
+ /** Returns image paths used in an XHTML document */
+ function findImages( $text ) {
+ global $wgOutputEncoding, $wgDumpImages;
+ $parser = xml_parser_create( $wgOutputEncoding );
+ xml_set_element_handler( $parser, 'wfDumpStartTagHandler', 'wfDumpEndTagHandler' );
+
+ $wgDumpImages = array();
+ xml_parse( $parser, $text );
+ xml_parser_free( $parser );
+
+ return $wgDumpImages;
+ }
+
+ /**
+ * Copy images (or create symlinks) from commons to a static directory.
+ * This is necessary even if you intend to distribute all of commons, because
+ * the directory contents is used to work out which image description pages
+ * are needed.
+ *
+ * Also copies math images
+ *
+ */
+ function copyImages( $images ) {
+ global $wgSharedUploadPath, $wgSharedUploadDirectory, $wgMathPath, $wgMathDirectory;
+ # Find shared uploads and copy them into the static directory
+ $sharedPathLength = strlen( $wgSharedUploadPath );
+ $mathPathLength = strlen( $wgMathPath );
+ foreach ( $images as $escapedImage => $dummy ) {
+ $image = urldecode( $escapedImage );
+
+ # Is it shared?
+ if ( substr( $image, 0, $sharedPathLength ) == $wgSharedUploadPath ) {
+ # Reconstruct full filename
+ $rel = substr( $image, $sharedPathLength + 1 ); // +1 for slash
+ $sourceLoc = "$wgSharedUploadDirectory/$rel";
+ $staticLoc = "{$this->sharedStaticPath}/$rel";
+ #print "Copying $sourceLoc to $staticLoc\n";
+ # Copy to static directory
+ if ( !file_exists( $staticLoc ) ) {
+ wfMkdirParents( dirname( $staticLoc ), 0755 );
+ if ( function_exists( 'symlink' ) && !$this->forceCopy ) {
+ symlink( $sourceLoc, $staticLoc );
+ } else {
+ copy( $sourceLoc, $staticLoc );
+ }
+ }
+
+ if ( substr( $rel, 0, 6 ) == 'thumb/' ) {
+ # That was a thumbnail
+ # We will also copy the real image
+ $parts = explode( '/', $rel );
+ $rel = "{$parts[1]}/{$parts[2]}/{$parts[3]}";
+ $sourceLoc = "$wgSharedUploadDirectory/$rel";
+ $staticLoc = "{$this->sharedStaticPath}/$rel";
+ #print "Copying $sourceLoc to $staticLoc\n";
+ if ( !file_exists( $staticLoc ) ) {
+ wfMkdirParents( dirname( $staticLoc ), 0755 );
+ if ( function_exists( 'symlink' ) && !$this->forceCopy ) {
+ symlink( $sourceLoc, $staticLoc );
+ } else {
+ copy( $sourceLoc, $staticLoc );
+ }
+ }
+ }
+ } else
+ # Is it math?
+ if ( substr( $image, 0, $mathPathLength ) == $wgMathPath ) {
+ $rel = substr( $image, $mathPathLength + 1 ); // +1 for slash
+ $source = "$wgMathDirectory/$rel";
+ $dest = "{$this->dest}/math/$rel";
+ @mkdir( "{$this->dest}/math", 0755 );
+ if ( !file_exists( $dest ) ) {
+ copy( $source, $dest );
+ }
+ }
+ }
+ }
+
+ function onGetFullURL( &$title, &$url, $query ) {
+ global $wgContLang, $wgArticlePath;
+
+ $iw = $title->getInterwiki();
+ if ( $title->isExternal() && $wgContLang->getLanguageName( $iw ) ) {
+ if ( $title->getDBkey() == '' ) {
+ $url = str_replace( '$1', "../$iw/index.html", $wgArticlePath );
+ } else {
+ $url = str_replace( '$1', "../$iw/" . wfUrlencode( $this->getHashedFilename( $title ) ),
+ $wgArticlePath );
+ }
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ function onGetLocalURL( &$title, &$url, $query ) {
+ global $wgArticlePath;
+
+ if ( $title->isExternal() ) {
+ # Default is fine for interwiki
+ return true;
+ }
+
+ $url = false;
+ if ( $query != '' ) {
+ parse_str( $query, $params );
+ if ( isset($params['action']) && $params['action'] == 'raw' ) {
+ if ( $params['gen'] == 'css' || $params['gen'] == 'js' ) {
+ $file = 'gen.' . $params['gen'];
+ } else {
+ $file = $this->getFriendlyName( $title->getPrefixedDBkey() );
+ // Clean up Monobook.css etc.
+ if ( preg_match( '/^(.*)\.(css|js)_[0-9a-f]{4}$/', $file, $matches ) ) {
+ $file = $matches[1] . '.' . $matches[2];
+ }
+ }
+ $this->rawPages[$file] = array( $file, $title, $params );
+ $url = str_replace( '$1', "raw/" . wfUrlencode( $file ), $wgArticlePath );
+ }
+ }
+ if ( $url === false ) {
+ $url = str_replace( '$1', wfUrlencode( $this->getHashedFilename( $title ) ), $wgArticlePath );
+ }
+
+ return false;
+ }
+
+ function getHashedFilename( &$title ) {
+ if ( '' != $title->mInterwiki ) {
+ $dbkey = $title->getDBkey();
+ } else {
+ $dbkey = $title->getPrefixedDBkey();
+ }
+
+ $mainPage = Title::newMainPage();
+ if ( $mainPage->getPrefixedDBkey() == $dbkey ) {
+ return 'index.html';
+ }
+
+ return $this->getHashedDirectory( $title ) . '/' .
+ $this->getFriendlyName( $dbkey ) . '.html';
+ }
+
+ function getFriendlyName( $name ) {
+ global $wgLang;
+ # Replace illegal characters for Windows paths with underscores
+ $friendlyName = strtr( $name, '/\\*?"<>|~', '_________' );
+
+ # Work out lower case form. We assume we're on a system with case-insensitive
+ # filenames, so unless the case is of a special form, we have to disambiguate
+ if ( function_exists( 'mb_strtolower' ) ) {
+ $lowerCase = $wgLang->ucfirst( mb_strtolower( $name ) );
+ } else {
+ $lowerCase = ucfirst( strtolower( $name ) );
+ }
+
+ # Make it mostly unique
+ if ( $lowerCase != $friendlyName ) {
+ $friendlyName .= '_' . substr(md5( $name ), 0, 4);
+ }
+ # Handle colon specially by replacing it with tilde
+ # Thus we reduce the number of paths with hashes appended
+ $friendlyName = str_replace( ':', '~', $friendlyName );
+
+ return $friendlyName;
+ }
+
+ /**
+ * Get a relative directory for putting a title into
+ */
+ function getHashedDirectory( &$title ) {
+ if ( '' != $title->getInterwiki() ) {
+ $pdbk = $title->getDBkey();
+ } else {
+ $pdbk = $title->getPrefixedDBkey();
+ }
+
+ # Find the first colon if there is one, use characters after it
+ $p = strpos( $pdbk, ':' );
+ if ( $p !== false ) {
+ $dbk = substr( $pdbk, $p + 1 );
+ $dbk = substr( $dbk, strspn( $dbk, '_' ) );
+ } else {
+ $dbk = $pdbk;
+ }
+
+ # Split into characters
+ preg_match_all( '/./us', $dbk, $m );
+
+ $chars = $m[0];
+ $length = count( $chars );
+ $dir = '';
+
+ for ( $i = 0; $i < $this->depth; $i++ ) {
+ if ( $i ) {
+ $dir .= '/';
+ }
+ if ( $i >= $length ) {
+ $dir .= '_';
+ } else {
+ $c = $chars[$i];
+ if ( ord( $c ) >= 128 || preg_match( '/[a-zA-Z0-9!#$%&()+,[\]^_`{}-]/', $c ) ) {
+ if ( function_exists( 'mb_strtolower' ) ) {
+ $dir .= mb_strtolower( $c );
+ } else {
+ $dir .= strtolower( $c );
+ }
+ } else {
+ $dir .= sprintf( "%02X", ord( $c ) );
+ }
+ }
+ }
+ return $dir;
+ }
+
+}
+
+/** XML parser callback */
+function wfDumpStartTagHandler( $parser, $name, $attribs ) {
+ global $wgDumpImages;
+
+ if ( $name == 'IMG' && isset( $attribs['SRC'] ) ) {
+ $wgDumpImages[$attribs['SRC']] = true;
+ }
+}
+
+/** XML parser callback */
+function wfDumpEndTagHandler( $parser, $name ) {}
+
+# vim: syn=php
+?>
diff --git a/maintenance/dumpHTML.php b/maintenance/dumpHTML.php
new file mode 100644
index 00000000..37a46465
--- /dev/null
+++ b/maintenance/dumpHTML.php
@@ -0,0 +1,131 @@
+<?php
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/**
+ * Usage:
+ * php dumpHTML.php [options...]
+ *
+ * -d <dest> destination directory
+ * -s <start> start ID
+ * -e <end> end ID
+ * -k <skin> skin to use (defaults to dumphtml)
+ * --images only do image description pages
+ * --categories only do category pages
+ * --redirects only do redirects
+ * --special only do miscellaneous stuff
+ * --force-copy copy commons instead of symlink, needed for Wikimedia
+ * --interlang allow interlanguage links
+ */
+
+
+$optionsWithArgs = array( 's', 'd', 'e', 'k' );
+
+$profiling = false;
+
+if ( $profiling ) {
+ define( 'MW_CMDLINE_CALLBACK', 'wfSetupDump' );
+ function wfSetupDump() {
+ global $wgProfiling, $wgProfileToDatabase, $wgProfileSampleRate;
+ $wgProfiling = true;
+ $wgProfileToDatabase = false;
+ $wgProfileSampleRate = 1;
+ }
+}
+
+require_once( "commandLine.inc" );
+require_once( "dumpHTML.inc" );
+
+error_reporting( E_ALL & (~E_NOTICE) );
+define( 'CHUNK_SIZE', 50 );
+
+if ( !empty( $options['s'] ) ) {
+ $start = $options['s'];
+} else {
+ $start = 1;
+}
+
+if ( !empty( $options['e'] ) ) {
+ $end = $options['e'];
+} else {
+ $dbr =& wfGetDB( DB_SLAVE );
+ $end = $dbr->selectField( 'page', 'max(page_id)', false );
+}
+
+if ( !empty( $options['d'] ) ) {
+ $dest = $options['d'];
+} else {
+ $dest = 'static';
+}
+
+$skin = isset( $options['k'] ) ? $options['k'] : 'dumphtml';
+
+$wgHTMLDump = new DumpHTML( array(
+ 'dest' => $dest,
+ 'forceCopy' => $options['force-copy'],
+ 'alternateScriptPath' => $options['interlang'],
+ 'interwiki' => $options['interlang'],
+ 'skin' => $skin,
+));
+
+
+if ( $options['special'] ) {
+ $wgHTMLDump->doSpecials();
+} elseif ( $options['images'] ) {
+ $wgHTMLDump->doImageDescriptions();
+} elseif ( $options['categories'] ) {
+ $wgHTMLDump->doCategories();
+} elseif ( $options['redirects'] ) {
+ $wgHTMLDump->doRedirects();
+} else {
+ print("Creating static HTML dump in directory $dest. \n".
+ "Starting from page_id $start of $end.\n");
+
+ $dbr =& wfGetDB( DB_SLAVE );
+ $server = $dbr->getProperty( 'mServer' );
+ print "Using database {$server}\n";
+
+ $wgHTMLDump->doArticles( $start, $end );
+ if ( !isset( $options['e'] ) ) {
+ $wgHTMLDump->doImageDescriptions();
+ $wgHTMLDump->doCategories();
+ $wgHTMLDump->doSpecials();
+ }
+
+ /*
+ if ( $end - $start > CHUNK_SIZE * 2 ) {
+ // Split the problem into smaller chunks, run them in different PHP instances
+ // This is a memory/resource leak workaround
+ print("Creating static HTML dump in directory $dest. \n".
+ "Starting from page_id $start of $end.\n");
+
+ chdir( "maintenance" );
+ for ( $chunkStart = $start; $chunkStart < $end; $chunkStart += CHUNK_SIZE ) {
+ $chunkEnd = $chunkStart + CHUNK_SIZE - 1;
+ if ( $chunkEnd > $end ) {
+ $chunkEnd = $end;
+ }
+ passthru( "php dumpHTML.php -d " . wfEscapeShellArg( $dest ) . " -s $chunkStart -e $chunkEnd" );
+ }
+ chdir( ".." );
+ $d->doImageDescriptions();
+ $d->doCategories();
+ $d->doMainPage( $dest );
+ } else {
+ $d->doArticles( $start, $end );
+ }
+ */
+}
+
+if ( isset( $options['debug'] ) ) {
+ print_r($GLOBALS);
+}
+
+if ( $profiling ) {
+ echo $wgProfiler->getOutput();
+}
+
+?>
diff --git a/maintenance/dumpInterwiki.inc b/maintenance/dumpInterwiki.inc
new file mode 100644
index 00000000..3cca1e02
--- /dev/null
+++ b/maintenance/dumpInterwiki.inc
@@ -0,0 +1,219 @@
+<?php
+/**
+ * Rebuild interwiki table using the file on meta and the language list
+ * Wikimedia specific!
+ *
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+class Site {
+ var $suffix, $lateral, $url;
+
+ function Site( $s, $l, $u ) {
+ $this->suffix = $s;
+ $this->lateral = $l;
+ $this->url = $u;
+ }
+
+ function getURL( $lang ) {
+ $xlang = str_replace( '_', '-', $lang );
+ return "http://$xlang.{$this->url}/wiki/\$1";
+ }
+}
+
+function getRebuildInterwikiDump() {
+ global $langlist, $languageAliases, $prefixRewrites, $wgDBname;
+
+ # Multi-language sites
+ # db suffix => db suffix, iw prefix, hostname
+ $sites = array(
+ 'wiki' => new Site( 'wiki', 'w', 'wikipedia.org' ),
+ 'wiktionary' => new Site( 'wiktionary', 'wikt', 'wiktionary.org' ),
+ 'wikiquote' => new Site( 'wikiquote', 'q', 'wikiquote.org' ),
+ 'wikibooks' => new Site( 'wikibooks', 'b', 'wikibooks.org' ),
+ 'wikinews' => new Site( 'wikinews', 'n', 'wikinews.org' ),
+ 'wikisource' => new Site( 'wikisource', 's', 'wikisource.org' ),
+ 'wikimedia' => new Site( 'wikimedia', 'chapter', 'wikimedia.org' ),
+ );
+
+ # List of language prefixes likely to be found in multi-language sites
+ $langlist = array_map( "trim", file( "/home/wikipedia/common/langlist" ) );
+
+ # List of all database names
+ $dblist = array_map( "trim", file( "/home/wikipedia/common/all.dblist" ) );
+
+ # Special-case hostnames
+ $specials = array(
+ 'sourceswiki' => 'sources.wikipedia.org',
+ 'quotewiki' => 'wikiquote.org',
+ 'textbookwiki' => 'wikibooks.org',
+ 'sep11wiki' => 'sep11.wikipedia.org',
+ 'metawiki' => 'meta.wikimedia.org',
+ 'commonswiki' => 'commons.wikimedia.org',
+ );
+
+ # Extra interwiki links that can't be in the intermap for some reason
+ $extraLinks = array(
+ array( 'm', 'http://meta.wikimedia.org/wiki/$1', 1 ),
+ array( 'meta', 'http://meta.wikimedia.org/wiki/$1', 1 ),
+ array( 'sep11', 'http://sep11.wikipedia.org/wiki/$1', 1 ),
+ );
+
+ # Language aliases, usually configured as redirects to the real wiki in apache
+ # Interlanguage links are made directly to the real wiki
+ # Something horrible happens if you forget to list an alias here, I can't
+ # remember what
+ $languageAliases = array(
+ 'zh-cn' => 'zh',
+ 'zh-tw' => 'zh',
+ 'dk' => 'da',
+ 'nb' => 'no',
+ );
+
+ # Special case prefix rewrites, for the benefit of Swedish which uses s:t
+ # as an abbreviation for saint
+ $prefixRewrites = array(
+ 'svwiki' => array ( 's' => 'src'),
+ );
+
+ # Construct a list of reserved prefixes
+ $reserved = array();
+ foreach ( $langlist as $lang ) {
+ $reserved[$lang] = 1;
+ }
+ foreach ( $languageAliases as $alias => $lang ) {
+ $reserved[$alias] = 1;
+ }
+ foreach( $sites as $site ) {
+ $reserved[$site->lateral] = 1;
+ }
+
+ # Extract the intermap from meta
+ $intermap = wfGetHTTP( 'http://meta.wikimedia.org/w/index.php?title=Interwiki_map&action=raw', 30 );
+ $lines = array_map( 'trim', explode( "\n", trim( $intermap ) ) );
+
+ if ( !$lines || count( $lines ) < 2 ) {
+ wfDie( "m:Interwiki_map not found" );
+ }
+
+ $iwArray = array();
+ # Global iterwiki map
+ foreach ( $lines as $line ) {
+ if ( preg_match( '/^\|\s*(.*?)\s*\|\|\s*(.*?)\s*$/', $line, $matches ) ) {
+ $prefix = strtolower( $matches[1] );
+ $url = $matches[2];
+ if ( preg_match( '/(wikipedia|wiktionary|wikisource|wikiquote|wikibooks|wikimedia)\.org/', $url ) ) {
+ $local = 1;
+ } else {
+ $local = 0;
+ }
+
+ if ( empty( $reserved[$prefix] ) ) {
+ $imap = array( "iw_prefix" => $prefix, "iw_url" => $url, "iw_local" => $local );
+ makeLink ($imap, "__global");
+ }
+ }
+ }
+
+ # Exclude Wikipedia for Wikipedia
+ makeLink ( array ('iw_prefix' => 'wikipedia', 'is_url' => null ), "_wiki" );
+
+ #Multilanguage sites
+ foreach ($sites as $site)
+ $sql .= makeLanguageLinks ( $site, "_".$site->suffix );
+
+
+ foreach ( $dblist as $db ) {
+ if ( isset( $specials[$db] ) ) {
+ # Special wiki
+ # Has interwiki links and interlanguage links to wikipedia
+
+ makeLink( array( 'iw_prefix' => $db, 'iw_url' => "wiki"), "__sites" );
+ # Links to multilanguage sites
+ foreach ( $sites as $targetSite ) {
+ makeLink( array( 'iw_prefix' => $targetSite->lateral,
+ 'iw_url' =>$targetSite->getURL( 'en' ),
+ 'iw_local' => 1 ), $db );
+ }
+
+ } else {
+ # Find out which site this DB belongs to
+ $site = false;
+ foreach( $sites as $candidateSite ) {
+ $suffix = $candidateSite->suffix;
+ if ( preg_match( "/(.*)$suffix$/", $db, $matches ) ) {
+ $site = $candidateSite;
+ break;
+ }
+ }
+ makeLink( array( 'iw_prefix' => $db, 'iw_url' => $site->suffix), "__sites" );
+ if ( !$site ) {
+ print "Invalid database $db\n";
+ continue;
+ }
+ $lang = $matches[1];
+ $host = "$lang." . $site->url;
+
+ # Lateral links
+ foreach ( $sites as $targetSite ) {
+ if ( $targetSite->suffix != $site->suffix ) {
+ makeLink( array( 'iw_prefix' => $targetSite->lateral,
+ 'iw_url' => $targetSite->getURL( $lang ),
+ 'iw_local' => 1 ), $db );
+ }
+ }
+
+ if ( $site->suffix == "wiki" ) {
+ makeLink( array('iw_prefix' => 'w',
+ 'iw_url' => "http://en.wikipedia.org/wiki/$1",
+ 'iw_local' => 1), $db );
+ }
+
+ }
+ }
+ foreach ( $extraLinks as $link )
+ makeLink( $link, "__global" );
+}
+
+# ------------------------------------------------------------------------------------------
+
+# Returns part of an INSERT statement, corresponding to all interlanguage links to a particular site
+function makeLanguageLinks( &$site, $source ) {
+ global $langlist, $languageAliases;
+ # Actual languages with their own databases
+ foreach ( $langlist as $targetLang ) {
+ makeLink( array( $targetLang, $site->getURL( $targetLang ), 1 ), $source );
+ }
+
+ # Language aliases
+ foreach ( $languageAliases as $alias => $lang ) {
+ makeLink( array( $alias, $site->getURL( $lang ), 1 ), $source );
+ }
+}
+
+function makeLink( $entry, $source ) {
+ global $prefixRewrites, $dbFile;
+ if ( isset( $prefixRewrites[$source] ) && isset( $prefixRewrites[$source][$entry[0]] ) )
+ $entry[0] = $prefixRewrites[$source][$entry[0]];
+ if (!array_key_exists("iw_prefix",$entry))
+ $entry = array("iw_prefix" => $entry[0], "iw_url" => $entry[1], "iw_local" => $entry[2]);
+ if ( array_key_exists($source,$prefixRewrites) &&
+ array_key_exists($entry['iw_prefix'],$prefixRewrites[$source]))
+ $entry['iw_prefix'] = $prefixRewrites[$source][$entry['iw_prefix']];
+ if ($dbFile)
+ dba_insert("{$source}:{$entry['iw_prefix']}", trim("{$entry['iw_local']} {$entry['iw_url']}"),$dbFile);
+ else
+ print "{$source}:{$entry['iw_prefix']} {$entry['iw_url']} {$entry['iw_local']}\n";
+
+ }
+
+?>
diff --git a/maintenance/dumpInterwiki.php b/maintenance/dumpInterwiki.php
new file mode 100644
index 00000000..411260ac
--- /dev/null
+++ b/maintenance/dumpInterwiki.php
@@ -0,0 +1,25 @@
+<?php
+/**
+ * Rebuild interwiki table using the file on meta and the language list
+ * Wikimedia specific!
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+$oldCwd = getcwd();
+
+$optionsWithArgs = array( "o" );
+include_once( "commandLine.inc" );
+include_once( "dumpInterwiki.inc" );
+chdir( $oldCwd );
+
+# Output
+if ( isset( $options['o'] ) ) {
+ # To database specified with -o
+ $dbFile = dba_open( $options['o'], "n", "cdb_make" );
+}
+
+getRebuildInterwikiDump();
+?>
diff --git a/maintenance/dumpLinks.php b/maintenance/dumpLinks.php
new file mode 100644
index 00000000..f040f390
--- /dev/null
+++ b/maintenance/dumpLinks.php
@@ -0,0 +1,63 @@
+<?php
+/**
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * Quick demo hack to generate a plaintext link dump,
+ * per the proposed wiki link database standard:
+ * http://www.usemod.com/cgi-bin/mb.pl?LinkDatabase
+ *
+ * Includes all (live and broken) intra-wiki links.
+ * Does not include interwiki or URL links.
+ * Dumps ASCII text to stdout; command-line.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @package MediaWiki
+ * @subpackage SpecialPage
+ */
+
+require_once 'commandLine.inc';
+
+$dbr =& wfGetDB( DB_SLAVE );
+$result = $dbr->select( array( 'pagelinks', 'page' ),
+ array(
+ 'page_id',
+ 'page_namespace',
+ 'page_title',
+ 'pl_namespace',
+ 'pl_title' ),
+ array( 'page_id=pl_from' ),
+ 'dumpLinks',
+ array( 'ORDER BY page_id' ) );
+
+$lastPage = null;
+while( $row = $dbr->fetchObject( $result ) ) {
+ if( $lastPage != $row->page_id ) {
+ if( isset( $lastPage ) ) {
+ print "\n";
+ }
+ $page = Title::makeTitle( $row->page_namespace, $row->page_title );
+ print $page->getPrefixedUrl();
+ $lastPage = $row->page_id;
+ }
+ $link = Title::makeTitle( $row->pl_namespace, $row->pl_title );
+ print " " . $link->getPrefixedUrl();
+}
+if( isset( $lastPage ) )
+ print "\n";
+
+?>
diff --git a/maintenance/dumpMessages.php b/maintenance/dumpMessages.php
new file mode 100644
index 00000000..84ecc4c6
--- /dev/null
+++ b/maintenance/dumpMessages.php
@@ -0,0 +1,19 @@
+<?php
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once( "commandLine.inc" );
+$wgMessageCache->disableTransform();
+$messages = array();
+foreach ( $wgAllMessagesEn as $key => $englishValue )
+{
+ $messages[$key] = wfMsg( $key );
+}
+print "MediaWiki $wgVersion language file\n";
+print serialize( $messages );
+
+?>
diff --git a/maintenance/dumpReplayLog.php b/maintenance/dumpReplayLog.php
new file mode 100644
index 00000000..aa1d5b9a
--- /dev/null
+++ b/maintenance/dumpReplayLog.php
@@ -0,0 +1,118 @@
+<?php
+/**
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+error_reporting(E_ALL);
+
+/** */
+require_once( "commandLine.inc" );
+require_once( 'includes/SpecialExport.php' );
+
+/** */
+function dumpReplayLog( $start ) {
+ $dbw =& wfGetDB( DB_MASTER );
+ $recentchanges = $dbw->tableName( 'recentchanges' );
+ $result =& $dbw->safeQuery( "SELECT * FROM $recentchanges WHERE rc_timestamp >= "
+ . $dbw->timestamp( $start ) . ' ORDER BY rc_timestamp');
+
+ global $wgInputEncoding;
+ echo '<' . '?xml version="1.0" encoding="' . $wgInputEncoding . '" ?' . ">\n";
+ echo "<wikilog version='experimental'>\n";
+ echo "<!-- Do not use this script for any purpose. It's scary. -->\n";
+ while( $row = $dbw->fetchObject( $result ) ) {
+ echo dumpReplayEntry( $row );
+ }
+ echo "</wikilog>\n";
+ $dbw->freeResult( $result );
+}
+
+/** */
+function dumpReplayEntry( $row ) {
+ $title = Title::MakeTitle( $row->rc_namespace, $row->rc_title );
+ switch( $row->rc_type ) {
+ case RC_EDIT:
+ case RC_NEW:
+ # Edit
+ $dbr =& wfGetDB( DB_MASTER );
+
+ $out = " <edit>\n";
+ $out .= " <title>" . xmlsafe( $title->getPrefixedText() ) . "</title>\n";
+
+ # Get previous edit timestamp
+ if( $row->rc_last_oldid ) {
+ $s = $dbr->selectRow( 'old',
+ array( 'old_timestamp' ),
+ array( 'old_id' => $row->rc_last_oldid ) );
+ $out .= " <lastedit>" . wfTimestamp2ISO8601( $s->old_timestamp ) . "</lastedit>\n";
+ } else {
+ $out .= " <newpage/>\n";
+ }
+
+ if( $row->rc_this_oldid ) {
+ $s = $dbr->selectRow( 'old', array( 'old_id as id','old_timestamp as timestamp',
+ 'old_user as user', 'old_user_text as user_text', 'old_comment as comment',
+ 'old_text as text', 'old_flags as flags' ),
+ array( 'old_id' => $row->rc_this_oldid ) );
+ $out .= revision2xml( $s, true, false );
+ } else {
+ $s = $dbr->selectRow( 'cur', array( 'cur_id as id','cur_timestamp as timestamp','cur_user as user',
+ 'cur_user_text as user_text', 'cur_restrictions as restrictions','cur_comment as comment',
+ 'cur_text as text' ),
+ array( 'cur_id' => $row->rc_cur_id ) );
+ $out .= revision2xml( $s, true, true );
+ }
+ $out .= " </edit>\n";
+ break;
+ case RC_LOG:
+ $dbr =& wfGetDB( DB_MASTER );
+ $s = $dbr->selectRow( 'logging',
+ array( 'log_type', 'log_action', 'log_timestamp', 'log_user',
+ 'log_namespace', 'log_title', 'log_comment' ),
+ array( 'log_timestamp' => $row->rc_timestamp,
+ 'log_user' => $row->rc_user ) );
+ $ts = wfTimestamp2ISO8601( $row->rc_timestamp );
+ $target = Title::MakeTitle( $s->log_namespace, $s->log_title );
+ $out = " <log>\n";
+ $out .= " <type>" . xmlsafe( $s->log_type ) . "</type>\n";
+ $out .= " <action>" . xmlsafe( $s->log_action ) . "</action>\n";
+ $out .= " <timestamp>" . $ts . "</timestamp>\n";
+ $out .= " <contributor><username>" . xmlsafe( $row->rc_user_text ) . "</username></contributor>\n";
+ $out .= " <target>" . xmlsafe( $target->getPrefixedText() ) . "</target>\n";
+ $out .= " <comment>" . xmlsafe( $s->log_comment ) . "</comment>\n";
+ $out .= " </log>\n";
+ break;
+ case RC_MOVE:
+ case RC_MOVE_OVER_REDIRECT:
+ $target = Title::MakeTitle( $row->rc_moved_to_ns, $row->rc_moved_to_title );
+ $out = " <move>\n";
+ $out .= " <title>" . xmlsafe( $title->getPrefixedText() ) . "</title>\n";
+ $out .= " <target>" . xmlsafe( $target->getPrefixedText() ) . "</target>\n";
+ if( $row->rc_type == RC_MOVE_OVER_REDIRECT ) {
+ $out .= " <override/>\n";
+ }
+ $ts = wfTimestamp2ISO8601( $row->rc_timestamp );
+ $out .= " <id>$row->rc_cur_id</id>\n";
+ $out .= " <timestamp>$ts</timestamp>\n";
+ if($row->rc_user_text) {
+ $u = "<username>" . xmlsafe( $row->rc_user_text ) . "</username>";
+ $u .= "<id>$row->rc_user</id>";
+ } else {
+ $u = "<ip>" . xmlsafe( $row->rc_user_text ) . "</ip>";
+ }
+ $out .= " <contributor>$u</contributor>\n";
+ $out .= " </move>\n";
+ }
+ return $out;
+}
+
+
+if( isset( $options['start'] ) ) {
+ $start = wfTimestamp( TS_MW, $options['start'] );
+ dumpReplayLog( $start );
+} else {
+ echo "This is an experimental script to encapsulate data from recent edits.\n";
+ echo "Usage: php dumpReplayLog.php --start=20050118032544\n";
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/dumpTextPass.php b/maintenance/dumpTextPass.php
new file mode 100644
index 00000000..78367c0b
--- /dev/null
+++ b/maintenance/dumpTextPass.php
@@ -0,0 +1,347 @@
+<?php
+/**
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @package MediaWiki
+ * @subpackage SpecialPage
+ */
+
+$originalDir = getcwd();
+
+require_once( 'commandLine.inc' );
+require_once( 'SpecialExport.php' );
+require_once( 'maintenance/backup.inc' );
+
+/**
+ * Stream wrapper around 7za filter program.
+ * Required since we can't pass an open file resource to XMLReader->open()
+ * which is used for the text prefetch.
+ */
+class SevenZipStream {
+ var $stream;
+
+ private function stripPath( $path ) {
+ $prefix = 'mediawiki.compress.7z://';
+ return substr( $path, strlen( $prefix ) );
+ }
+
+ function stream_open( $path, $mode, $options, &$opened_path ) {
+ if( $mode{0} == 'r' ) {
+ $options = 'e -bd -so';
+ } elseif( $mode{0} == 'w' ) {
+ $options = 'a -bd -si';
+ } else {
+ return false;
+ }
+ $arg = wfEscapeShellArg( $this->stripPath( $path ) );
+ $command = "7za $options $arg";
+ if( !wfIsWindows() ) {
+ // Suppress the stupid messages on stderr
+ $command .= ' 2>/dev/null';
+ }
+ $this->stream = popen( $command, $mode );
+ return ($this->stream !== false);
+ }
+
+ function url_stat( $path, $flags ) {
+ return stat( $this->stripPath( $path ) );
+ }
+
+ // This is all so lame; there should be a default class we can extend
+
+ function stream_close() {
+ return fclose( $this->stream );
+ }
+
+ function stream_flush() {
+ return fflush( $this->stream );
+ }
+
+ function stream_read( $count ) {
+ return fread( $this->stream, $count );
+ }
+
+ function stream_write( $data ) {
+ return fwrite( $this->stream, $data );
+ }
+
+ function stream_tell() {
+ return ftell( $this->stream );
+ }
+
+ function stream_eof() {
+ return feof( $this->stream );
+ }
+
+ function stream_seek( $offset, $whence ) {
+ return fseek( $this->stream, $offset, $whence );
+ }
+}
+stream_wrapper_register( 'mediawiki.compress.7z', 'SevenZipStream' );
+
+
+class TextPassDumper extends BackupDumper {
+ var $prefetch = null;
+ var $input = "php://stdin";
+ var $history = MW_EXPORT_FULL;
+ var $fetchCount = 0;
+ var $prefetchCount = 0;
+
+ function dump() {
+ # This shouldn't happen if on console... ;)
+ header( 'Content-type: text/html; charset=UTF-8' );
+
+ # Notice messages will foul up your XML output even if they're
+ # relatively harmless.
+// ini_set( 'display_errors', false );
+
+ $this->initProgress( $this->history );
+
+ $this->db =& $this->backupDb();
+
+ $this->egress = new ExportProgressFilter( $this->sink, $this );
+
+ $input = fopen( $this->input, "rt" );
+ $result = $this->readDump( $input );
+
+ if( WikiError::isError( $result ) ) {
+ wfDie( $result->getMessage() );
+ }
+
+ $this->report( true );
+ }
+
+ function processOption( $opt, $val, $param ) {
+ $url = $this->processFileOpt( $val, $param );
+
+ switch( $opt ) {
+ case 'prefetch':
+ require_once 'maintenance/backupPrefetch.inc';
+ $this->prefetch = new BaseDump( $url );
+ break;
+ case 'stub':
+ $this->input = $url;
+ break;
+ case 'current':
+ $this->history = MW_EXPORT_CURRENT;
+ break;
+ case 'full':
+ $this->history = MW_EXPORT_FULL;
+ break;
+ }
+ }
+
+ function processFileOpt( $val, $param ) {
+ switch( $val ) {
+ case "file":
+ return $param;
+ case "gzip":
+ return "compress.zlib://$param";
+ case "bzip2":
+ return "compress.bzip2://$param";
+ case "7zip":
+ return "mediawiki.compress.7z://$param";
+ default:
+ return $val;
+ }
+ }
+
+ /**
+ * Overridden to include prefetch ratio if enabled.
+ */
+ function showReport() {
+ if( !$this->prefetch ) {
+ return parent::showReport();
+ }
+
+ if( $this->reporting ) {
+ $delta = wfTime() - $this->startTime;
+ $now = wfTimestamp( TS_DB );
+ if( $delta ) {
+ $rate = $this->pageCount / $delta;
+ $revrate = $this->revCount / $delta;
+ $portion = $this->revCount / $this->maxCount;
+ $eta = $this->startTime + $delta / $portion;
+ $etats = wfTimestamp( TS_DB, intval( $eta ) );
+ $fetchrate = 100.0 * $this->prefetchCount / $this->fetchCount;
+ } else {
+ $rate = '-';
+ $revrate = '-';
+ $etats = '-';
+ $fetchrate = '-';
+ }
+ global $wgDBname;
+ $this->progress( sprintf( "%s: %s %d pages (%0.3f/sec), %d revs (%0.3f/sec), %0.1f%% prefetched, ETA %s [max %d]",
+ $now, $wgDBname, $this->pageCount, $rate, $this->revCount, $revrate, $fetchrate, $etats, $this->maxCount ) );
+ }
+ }
+
+ function readDump( $input ) {
+ $this->buffer = "";
+ $this->openElement = false;
+ $this->atStart = true;
+ $this->state = "";
+ $this->lastName = "";
+ $this->thisPage = 0;
+ $this->thisRev = 0;
+
+ $parser = xml_parser_create( "UTF-8" );
+ xml_parser_set_option( $parser, XML_OPTION_CASE_FOLDING, false );
+
+ xml_set_element_handler( $parser, array( &$this, 'startElement' ), array( &$this, 'endElement' ) );
+ xml_set_character_data_handler( $parser, array( &$this, 'characterData' ) );
+
+ $offset = 0; // for context extraction on error reporting
+ $bufferSize = 512 * 1024;
+ do {
+ $chunk = fread( $input, $bufferSize );
+ if( !xml_parse( $parser, $chunk, feof( $input ) ) ) {
+ wfDebug( "TextDumpPass::readDump encountered XML parsing error\n" );
+ return new WikiXmlError( $parser, 'XML import parse failure', $chunk, $offset );
+ }
+ $offset += strlen( $chunk );
+ } while( $chunk !== false && !feof( $input ) );
+ xml_parser_free( $parser );
+
+ return true;
+ }
+
+ function getText( $id ) {
+ $this->fetchCount++;
+ if( isset( $this->prefetch ) ) {
+ $text = $this->prefetch->prefetch( $this->thisPage, $this->thisRev );
+ if( $text === null ) {
+ // Entry missing from prefetch dump
+ } elseif( $text === "" ) {
+ // Blank entries may indicate that the prior dump was broken.
+ // To be safe, reload it.
+ } else {
+ $this->prefetchCount++;
+ return $text;
+ }
+ }
+ $id = intval( $id );
+ $row = $this->db->selectRow( 'text',
+ array( 'old_text', 'old_flags' ),
+ array( 'old_id' => $id ),
+ 'TextPassDumper::getText' );
+ $text = Revision::getRevisionText( $row );
+ $stripped = str_replace( "\r", "", $text );
+ $normalized = UtfNormal::cleanUp( $stripped );
+ return $normalized;
+ }
+
+ function startElement( $parser, $name, $attribs ) {
+ $this->clearOpenElement( null );
+ $this->lastName = $name;
+
+ if( $name == 'revision' ) {
+ $this->state = $name;
+ $this->egress->writeOpenPage( null, $this->buffer );
+ $this->buffer = "";
+ } elseif( $name == 'page' ) {
+ $this->state = $name;
+ if( $this->atStart ) {
+ $this->egress->writeOpenStream( $this->buffer );
+ $this->buffer = "";
+ $this->atStart = false;
+ }
+ }
+
+ if( $name == "text" && isset( $attribs['id'] ) ) {
+ $text = $this->getText( $attribs['id'] );
+ $this->openElement = array( $name, array( 'xml:space' => 'preserve' ) );
+ if( strlen( $text ) > 0 ) {
+ $this->characterData( $parser, $text );
+ }
+ } else {
+ $this->openElement = array( $name, $attribs );
+ }
+ }
+
+ function endElement( $parser, $name ) {
+ if( $this->openElement ) {
+ $this->clearOpenElement( "" );
+ } else {
+ $this->buffer .= "</$name>";
+ }
+
+ if( $name == 'revision' ) {
+ $this->egress->writeRevision( null, $this->buffer );
+ $this->buffer = "";
+ $this->thisRev = "";
+ } elseif( $name == 'page' ) {
+ $this->egress->writeClosePage( $this->buffer );
+ $this->buffer = "";
+ $this->thisPage = "";
+ } elseif( $name == 'mediawiki' ) {
+ $this->egress->writeCloseStream( $this->buffer );
+ $this->buffer = "";
+ }
+ }
+
+ function characterData( $parser, $data ) {
+ $this->clearOpenElement( null );
+ if( $this->lastName == "id" ) {
+ if( $this->state == "revision" ) {
+ $this->thisRev .= $data;
+ } elseif( $this->state == "page" ) {
+ $this->thisPage .= $data;
+ }
+ }
+ $this->buffer .= htmlspecialchars( $data );
+ }
+
+ function clearOpenElement( $style ) {
+ if( $this->openElement ) {
+ $this->buffer .= wfElement( $this->openElement[0], $this->openElement[1], $style );
+ $this->openElement = false;
+ }
+ }
+}
+
+
+$dumper = new TextPassDumper( $argv );
+
+if( true ) {
+ $dumper->dump();
+} else {
+ $dumper->progress( <<<END
+This script postprocesses XML dumps from dumpBackup.php to add
+page text which was stubbed out (using --stub).
+
+XML input is accepted on stdin.
+XML output is sent to stdout; progress reports are sent to stderr.
+
+Usage: php dumpTextPass.php [<options>]
+Options:
+ --stub=<type>:<file> To load a compressed stub dump instead of stdin
+ --prefetch=<type>:<file> Use a prior dump file as a text source, to save
+ pressure on the database.
+ (Requires PHP 5.0+ and the XMLReader PECL extension)
+ --quiet Don't dump status reports to stderr.
+ --report=n Report position and speed after every n pages processed.
+ (Default: 100)
+ --server=h Force reading from MySQL server h
+ --current Base ETA on number of pages in database instead of all revisions
+END
+);
+}
+
+?>
diff --git a/maintenance/duplicatetrans.php b/maintenance/duplicatetrans.php
new file mode 100644
index 00000000..89d9cff1
--- /dev/null
+++ b/maintenance/duplicatetrans.php
@@ -0,0 +1,29 @@
+<?php
+/**
+ * Prints out messages that are the same as the message with the corrisponding
+ * key in the Language.php file
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+require_once('commandLine.inc');
+
+if ( 'en' == $wgLanguageCode ) {
+ print "Current selected language is English. Cannot check translations.\n";
+ exit();
+}
+
+$count = $total = 0;
+$msgarray = 'wgAllMessages' . ucfirst( $wgLanguageCode );
+
+foreach ( $$msgarray as $code => $msg ) {
+ ++$total;
+ if ( @$wgAllMessagesEn[$code] == $msg ) {
+ echo "* $code\n";
+ ++$count;
+ }
+}
+
+echo "{$count} messages of {$total} are duplicates\n";
+?>
diff --git a/maintenance/entities2literals.pl b/maintenance/entities2literals.pl
new file mode 100644
index 00000000..dd47f6bb
--- /dev/null
+++ b/maintenance/entities2literals.pl
@@ -0,0 +1,276 @@
+#!/usr/bin/env perl
+# Takes STDIN and converts Converts hexadecimal, decimal and named HTML
+# entities to their respective literals.
+#
+# Usage: perl entities2literals.pl < file_to_convert [> outfile]
+# Reference: http://www.w3.org/TR/REC-html40/sgml/entities.html
+# Copyright 2005 Ævar Arnfjörð Bjarmason <avarab@gmail.com> No rights reserved
+
+use encoding 'utf8';
+use strict;
+
+my $file = join /\n/, <>;
+
+$file =~ s/&#(\d+);/chr $1/eg;
+$file =~ s/&#x([0-9a-fA-F]+);/chr hex $1/eg;
+
+while (<DATA>) {
+ chomp;
+ my ($number, $entity) = split / +/;
+ $file =~ s/&$entity;/chr $number/eg;
+}
+print $file;
+
+__DATA__
+34 quot
+38 amp
+60 lt
+62 gt
+160 nbsp
+161 iexcl
+162 cent
+163 pound
+164 curren
+165 yen
+166 brvbar
+167 sect
+168 uml
+169 copy
+170 ordf
+171 laquo
+172 not
+173 shy
+174 reg
+175 macr
+176 deg
+177 plusmn
+178 sup2
+179 sup3
+180 acute
+181 micro
+182 para
+183 middot
+184 cedil
+185 sup1
+186 ordm
+187 raquo
+188 frac14
+189 frac12
+190 frac34
+191 iquest
+192 Agrave
+193 Aacute
+194 Acirc
+195 Atilde
+196 Auml
+197 Aring
+198 AElig
+199 Ccedil
+200 Egrave
+201 Eacute
+202 Ecirc
+203 Euml
+204 Igrave
+205 Iacute
+206 Icirc
+207 Iuml
+208 ETH
+209 Ntilde
+210 Ograve
+211 Oacute
+212 Ocirc
+213 Otilde
+214 Ouml
+215 times
+216 Oslash
+217 Ugrave
+218 Uacute
+219 Ucirc
+220 Uuml
+221 Yacute
+222 THORN
+223 szlig
+224 agrave
+225 aacute
+226 acirc
+227 atilde
+228 auml
+229 aring
+230 aelig
+231 ccedil
+232 egrave
+233 eacute
+234 ecirc
+235 euml
+236 igrave
+237 iacute
+238 icirc
+239 iuml
+240 eth
+241 ntilde
+242 ograve
+243 oacute
+244 ocirc
+245 otilde
+246 ouml
+247 divide
+248 oslash
+249 ugrave
+250 uacute
+251 ucirc
+252 uuml
+253 yacute
+254 thorn
+255 yuml
+338 OElig
+339 oelig
+352 Scaron
+353 scaron
+376 Yuml
+402 fnof
+710 circ
+732 tilde
+913 Alpha
+914 Beta
+915 Gamma
+916 Delta
+917 Epsilon
+918 Zeta
+919 Eta
+920 Theta
+921 Iota
+922 Kappa
+923 Lambda
+924 Mu
+925 Nu
+926 Xi
+927 Omicron
+928 Pi
+929 Rho
+931 Sigma
+932 Tau
+933 Upsilon
+934 Phi
+935 Chi
+936 Psi
+937 Omega
+945 alpha
+946 beta
+947 gamma
+948 delta
+949 epsilon
+950 zeta
+951 eta
+952 theta
+953 iota
+954 kappa
+955 lambda
+956 mu
+957 nu
+958 xi
+959 omicron
+960 pi
+961 rho
+962 sigmaf
+963 sigma
+964 tau
+965 upsilon
+966 phi
+967 chi
+968 psi
+969 omega
+977 thetasym
+978 upsih
+982 piv
+8194 ensp
+8195 emsp
+8201 thinsp
+8204 zwnj
+8205 zwj
+8206 lrm
+8207 rlm
+8211 ndash
+8212 mdash
+8216 lsquo
+8217 rsquo
+8218 sbquo
+8220 ldquo
+8221 rdquo
+8222 bdquo
+8224 dagger
+8225 Dagger
+8226 bull
+8230 hellip
+8240 permil
+8242 prime
+8243 Prime
+8249 lsaquo
+8250 rsaquo
+8254 oline
+8260 frasl
+8364 euro
+8465 image
+8472 weierp
+8476 real
+8482 trade
+8501 alefsym
+8592 larr
+8593 uarr
+8594 rarr
+8595 darr
+8596 harr
+8629 crarr
+8656 lArr
+8657 uArr
+8658 rArr
+8659 dArr
+8660 hArr
+8704 forall
+8706 part
+8707 exist
+8709 empty
+8711 nabla
+8712 isin
+8713 notin
+8715 ni
+8719 prod
+8721 sum
+8722 minus
+8727 lowast
+8730 radic
+8733 prop
+8734 infin
+8736 ang
+8743 and
+8744 or
+8745 cap
+8746 cup
+8747 int
+8756 there4
+8764 sim
+8773 cong
+8776 asymp
+8800 ne
+8801 equiv
+8804 le
+8805 ge
+8834 sub
+8835 sup
+8836 nsub
+8838 sube
+8839 supe
+8853 oplus
+8855 otimes
+8869 perp
+8901 sdot
+8968 lceil
+8969 rceil
+8970 lfloor
+8971 rfloor
+9001 lang
+9002 rang
+9674 loz
+9824 spades
+9827 clubs
+9829 hearts
+9830 diams
diff --git a/maintenance/eval.php b/maintenance/eval.php
new file mode 100644
index 00000000..4e477f4c
--- /dev/null
+++ b/maintenance/eval.php
@@ -0,0 +1,63 @@
+<?php
+/**
+ * PHP lacks an interactive mode, but this can be very helpful when debugging.
+ * This script lets a command-line user start up the wiki engine and then poke
+ * about by issuing PHP commands directly.
+ *
+ * Unlike eg Python, you need to use a 'return' statement explicitly for the
+ * interactive shell to print out the value of the expression. Multiple lines
+ * are evaluated separately, so blocks need to be input without a line break.
+ * Fatal errors such as use of undeclared functions can kill the shell.
+ *
+ * To get decent line editing behavior, you should compile PHP with support
+ * for GNU readline (pass --with-readline to configure).
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+$wgForceLoadBalancing = (getenv('MW_BALANCE') ? true : false);
+$wgUseNormalUser = (getenv('MW_WIKIUSER') ? true : false);
+if (getenv('MW_PROFILING')) {
+ define('MW_CMDLINE_CALLBACK', 'wfSetProfiling');
+}
+function wfSetProfiling() { $GLOBALS['wgProfiling'] = true; }
+
+$optionsWithArgs = array( 'd' );
+
+/** */
+require_once( "commandLine.inc" );
+
+if ( isset( $options['d'] ) ) {
+ $d = $options['d'];
+ if ( $d > 0 ) {
+ $wgDebugLogFile = '/dev/stdout';
+ }
+ if ( $d > 1 ) {
+ foreach ( $wgLoadBalancer->mServers as $i => $server ) {
+ $wgLoadBalancer->mServers[$i]['flags'] |= DBO_DEBUG;
+ }
+ }
+ if ( $d > 2 ) {
+ $wgDebugFunctionEntry = true;
+ }
+}
+
+
+while ( ( $line = readconsole( '> ' ) ) !== false ) {
+ $val = eval( $line . ";" );
+ if( is_null( $val ) ) {
+ echo "\n";
+ } elseif( is_string( $val ) || is_numeric( $val ) ) {
+ echo "$val\n";
+ } else {
+ var_dump( $val );
+ }
+ if ( function_exists( "readline_add_history" ) ) {
+ readline_add_history( $line );
+ }
+}
+
+print "\n";
+
+?>
diff --git a/maintenance/fetchInterwiki.pl b/maintenance/fetchInterwiki.pl
new file mode 100644
index 00000000..cb56a6df
--- /dev/null
+++ b/maintenance/fetchInterwiki.pl
@@ -0,0 +1,102 @@
+#!/usr/bin/env perl
+# Copyright (C) 2005 Ævar Arnfjörð Bjarmason
+use strict;
+use warnings;
+use Socket;
+
+# Conf
+my $map = &get(&url('http://usemod.com/intermap.txt'));
+
+# --- #
+my $cont;
+my @map = split /\n/, $map;
+
+$cont .= '<?php
+# Note: this file is generated by maintenance/fetchInterwiki.pl
+# Edit and rerun that script rather than modifying this directly.
+
+/* private */ $wgValidInterwikis = array(
+';
+
+$cont .= "\t# The usemod interwiki map\n";
+for (my $i=0;$i<=$#map;++$i) {
+ my ($name, $url) = $map[$i] =~ m#^([^ ]+) (.+)#i;
+ $cont .= "\t'$name' => '$url\$1',\n";
+}
+
+my @iso = qw(
+aa ab af als am ar as ay az ba be bg bh bi bn bo bs ca chr co cs csb cy da de dk:da dz el en eo
+es et eu fa fi fj fo fr fy ga gd gl gn gu gv ha he hi hr hu hy ia id ik io is it iu ja jv ka kk
+kl km kn ko ks ku ky la lo lt lv mg mi mk ml mn mo mr ms my na nah nb nds ne nl no oc om or pa
+pl ps pt qu rm rn ro ru rw sa sd sg sh si sk sl sm sn so sq sr ss st su sv sw ta te tg th ti tk
+tl tn to tp tpi tr ts tt tw ug uk ur uz vi vo wa wo xh yi yo za zh zh-cn zh-tw zu);
+
+$cont .= '
+ # Some custom additions:
+ "ReVo" => "http://purl.org/NET/voko/revo/art/$1.html",
+ # eg [[ReVo:cerami]], [[ReVo:astero]] - note X-sensitive!
+ "EcheI" => "http://www.ikso.net/cgi-bin/wiki.pl?$1",
+ "E\\xc4\\x89eI" => "http://www.ikso.net/cgi-bin/wiki.pl?$1",
+ "UnuMondo" => "http://unumondo.com/cgi-bin/wiki.pl?$1", # X-sensitive!
+ "JEFO" => "http://esperanto.jeunes.free.fr/vikio/index.php?$1",
+ "PMEG" => "http://www.bertilow.com/pmeg/$1.php",
+ # ekz [[PMEG:gramatiko/kunligaj vortetoj/au]]
+ "EnciclopediaLibre" => "http://enciclopedia.us.es/wiki.phtml?title=$1",
+
+ # Wikipedia-specific stuff:
+ # Special cases
+ "w" => "http://www.wikipedia.org/wiki/$1",
+ "m" => "http://meta.wikipedia.org/wiki/$1",
+ "meta" => "http://meta.wikipedia.org/wiki/$1",
+ "sep11" => "http://sep11.wikipedia.org/wiki/$1",
+ "simple"=> "http://simple.wikipedia.com/wiki.cgi?$1",
+ "wiktionary" => "http://wiktionary.wikipedia.org/wiki/$1",
+ "PageHistory" => "http://www.wikipedia.org/w/wiki.phtml?title=$1&action=history",
+ "UserContributions" => "http://www.wikipedia.org/w/wiki.phtml?title=Special:Contributions&target=$1",
+ "BackLinks" => "http://www.wikipedia.org/w/wiki.phtml?title=Special:Whatlinkshere&target=$1",
+
+ # ISO 639 2-letter language codes
+';
+
+for(my $i=0; $i<=$#iso;++$i) {
+ my @arr = split /:/, $iso[$i];
+ $cont .= "\t";
+ $cont .= "'$arr[0]' => 'http://";
+
+ if ($arr[1]) {
+ $cont .= $arr[1];
+ } else {
+ $cont .= $arr[0];
+ }
+ $cont .= ".wikipedia.org/wiki/\$1',\n";
+}
+
+$cont .= '
+);
+?>
+';
+
+open IW, ">Interwiki.php";
+print IW $cont;
+close IW;
+
+sub get {
+ my ($host, $url) = @_;
+ my $cont;
+ my $eat;
+
+ my $proto = getprotobyname('tcp');
+ socket(Socket, AF_INET, SOCK_STREAM, $proto);
+ my $iaddr = inet_aton("$host");
+ my $port = getservbyname('http', 'tcp');
+ my $sin = sockaddr_in($port, $iaddr);
+ connect(Socket, $sin);
+ send Socket, "GET $url HTTP/1.0\r\nHost: $host\r\n\r\n",0;
+ while (<Socket>) {
+ $cont .= $_ if $eat; # mmm, food
+ ++$eat if ($_ =~ /^(\n|\r\n|)$/);
+ }
+ return $cont;
+}
+
+sub url {my ($server, $path) = $_[0] =~ m#.*(?=//)//([^/]*)(.*)#g;}
diff --git a/maintenance/findhooks.php b/maintenance/findhooks.php
new file mode 100644
index 00000000..4f446f2b
--- /dev/null
+++ b/maintenance/findhooks.php
@@ -0,0 +1,93 @@
+<?php
+/**
+ * Simple script that try to find documented hook and hooks actually
+ * in the code and show what's missing.
+ *
+ * This script assumes that:
+ * - hooks names in hooks.txt are at the beginning of a line and single quoted.
+ * - hooks names in code are the first parameter of wfRunHooks.
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ *
+ * @author Ashar Voultoiz <hashar@altern.org>
+ * @copyright Copyright © Ashar voultoiz
+ * @license http://www.gnu.org/copyleft/gpl.html GNU General Public Licence 2.0 or later
+ */
+
+/** This is a command line script*/
+include('commandLine.inc');
+
+
+# GLOBALS
+
+$doc = $IP . '/docs/hooks.txt';
+$pathinc = $IP . '/includes/';
+
+
+# FUNCTIONS
+
+/**
+ * @return array of documented hooks
+ */
+function getHooksFromDoc() {
+ global $doc;
+ $content = file_get_contents( $doc );
+ preg_match_all( "/\n'(.*?)'/", $content, $m);
+ return $m[1];
+}
+
+/**
+ * Get hooks from a php file
+ * @param $file Full filename to the PHP file.
+ * @return array of hooks found.
+ */
+function getHooksFromFile( $file ) {
+ $content = file_get_contents( $file );
+ preg_match_all( "/wfRunHooks\(\s*\'(.*?)\'/", $content, $m);
+ return $m[1];
+}
+
+/**
+ * Get hooks from the source code.
+ * @param $path Directory where the include files can be found
+ * @return array of hooks found.
+ */
+function getHooksFromPath( $path ) {
+ $hooks = array();
+ if( $dh = opendir($path) ) {
+ while(($file = readdir($dh)) !== false) {
+ if( filetype($path.$file) == 'file' ) {
+ $hooks = array_merge( $hooks, getHooksFromFile($path.$file) );
+ }
+ }
+ closedir($dh);
+ }
+ return $hooks;
+}
+
+/**
+ * Nicely output the array
+ * @param $msg A message to show before the value
+ * @param $arr An array
+ * @param $sort Boolean : wheter to sort the array (Default: true)
+ */
+function printArray( $msg, $arr, $sort = true ) {
+ if($sort) asort($arr);
+ foreach($arr as $v) print "$msg: $v\n";
+}
+
+
+# MAIN
+
+$documented = getHooksFromDoc($doc);
+$potential = getHooksFromPath($pathinc);
+
+$todo = array_diff($potential, $documented);
+$deprecated = array_diff($documented, $potential);
+
+// let's show the results:
+printArray('undocumented', $todo );
+printArray('not found', $deprecated );
+
+?>
diff --git a/maintenance/fixSlaveDesync.php b/maintenance/fixSlaveDesync.php
new file mode 100644
index 00000000..e97f96c9
--- /dev/null
+++ b/maintenance/fixSlaveDesync.php
@@ -0,0 +1,100 @@
+<?php
+
+$wgUseRootUser = true;
+require_once( 'commandLine.inc' );
+
+//$wgDebugLogFile = '/dev/stdout';
+
+$slaveIndexes = array();
+for ( $i = 1; $i < count( $wgDBservers ); $i++ ) {
+ if ( $wgLoadBalancer->isNonZeroLoad( $i ) ) {
+ $slaveIndexes[] = $i;
+ }
+}
+/*
+foreach ( $wgLoadBalancer->mServers as $i => $server ) {
+ $wgLoadBalancer->mServers[$i]['flags'] |= DBO_DEBUG;
+}*/
+$reportingInterval = 1000;
+
+if ( isset( $args[0] ) ) {
+ desyncFixPage( $args[0] );
+} else {
+ $dbw =& wfGetDB( DB_MASTER );
+ $maxPage = $dbw->selectField( 'page', 'MAX(page_id)', false, 'fixDesync.php' );
+ for ( $i=1; $i <= $maxPage; $i++ ) {
+ desyncFixPage( $i );
+ if ( !($i % $reportingInterval) ) {
+ print "$i\n";
+ }
+ }
+}
+
+function desyncFixPage( $pageID ) {
+ global $slaveIndexes;
+ $fname = 'desyncFixPage';
+
+ # Check for a corrupted page_latest
+ $dbw =& wfGetDB( DB_MASTER );
+ $realLatest = $dbw->selectField( 'page', 'page_latest', array( 'page_id' => $pageID ), $fname );
+ $found = false;
+ foreach ( $slaveIndexes as $i ) {
+ $db =& wfGetDB( $i );
+ $latest = $db->selectField( 'page', 'page_latest', array( 'page_id' => $pageID ), $fname );
+ $max = $db->selectField( 'revision', 'MAX(rev_id)', false, $fname );
+ if ( $latest != $realLatest && $realLatest < $max ) {
+ print "page_latest corrupted in page $pageID, server $i\n";
+ $found = true;
+ break;
+ }
+ }
+ if ( !$found ) {
+ return;
+ }
+
+ # Find the missing revision
+ $res = $dbw->select( 'revision', array( 'rev_id' ), array( 'rev_page' => $pageID ), $fname );
+ $masterIDs = array();
+ while ( $row = $dbw->fetchObject( $res ) ) {
+ $masterIDs[] = $row->rev_id;
+ }
+ $dbw->freeResult( $res );
+
+ $res = $db->select( 'revision', array( 'rev_id' ), array( 'rev_page' => $pageID ), $fname );
+ $slaveIDs = array();
+ while ( $row = $db->fetchObject( $res ) ) {
+ $slaveIDs[] = $row->rev_id;
+ }
+ $db->freeResult( $res );
+ $missingIDs = array_diff( $masterIDs, $slaveIDs );
+
+ if ( count( $missingIDs ) ) {
+ print "Found " . count( $missingIDs ) . " missing revision(s), copying from master... ";
+ foreach ( $missingIDs as $rid ) {
+ print "$rid ";
+ # Revision
+ $row = $dbw->selectRow( 'revision', '*', array( 'rev_id' => $rid ), $fname );
+ foreach ( $slaveIndexes as $i ) {
+ $db =& wfGetDB( $i );
+ $db->insert( 'revision', get_object_vars( $row ), $fname, 'IGNORE' );
+ }
+
+ # Text
+ $row = $dbw->selectRow( 'text', '*', array( 'old_id' => $row->rev_text_id ), $fname );
+ foreach ( $slaveIndexes as $i ) {
+ $db =& wfGetDB( $i );
+ $db->insert( 'text', get_object_vars( $row ), $fname, 'IGNORE' );
+ }
+ }
+ print "done\n";
+ }
+
+ print "Fixing page_latest... ";
+ foreach ( $slaveIndexes as $i ) {
+ $db =& wfGetDB( $i );
+ $db->update( 'page', array( 'page_latest' => $realLatest ), array( 'page_id' => $pageID ), $fname );
+ }
+ print "done\n";
+}
+
+?>
diff --git a/maintenance/fixTimestamps.php b/maintenance/fixTimestamps.php
new file mode 100644
index 00000000..784e35cd
--- /dev/null
+++ b/maintenance/fixTimestamps.php
@@ -0,0 +1,104 @@
+<?php
+
+/**
+ * This script fixes timestamp corruption caused by one or more webservers
+ * temporarily being set to the wrong time. The time offset must be known and
+ * consistent. Start and end times (in 14-character format) restrict the search,
+ * and must bracket the damage. There must be a majority of good timestamps in the
+ * search period.
+ */
+
+require_once( 'commandLine.inc' );
+
+if ( count( $args ) < 3 ) {
+ echo "Usage: php fixTimestamps.php <offset in hours> <start time> <end time>\n";
+ exit(1);
+}
+
+$offset = $args[0] * 3600;
+$start = $args[1];
+$end = $args[2];
+$fname = 'fixTimestamps.php';
+$grace = 60; // maximum normal clock offset
+
+# Find bounding revision IDs
+$dbw =& wfGetDB( DB_MASTER );
+$revisionTable = $dbw->tableName( 'revision' );
+$res = $dbw->query( "SELECT MIN(rev_id) as minrev, MAX(rev_id) as maxrev FROM $revisionTable " .
+ "WHERE rev_timestamp BETWEEN '{$start}' AND '{$end}'", $fname );
+$row = $dbw->fetchObject( $res );
+
+if ( is_null( $row->minrev ) ) {
+ echo "No revisions in search period.\n";
+ exit(0);
+}
+
+$minRev = $row->minrev;
+$maxRev = $row->maxrev;
+
+# Select all timestamps and IDs
+$sql = "SELECT rev_id, rev_timestamp FROM $revisionTable " .
+ "WHERE rev_id BETWEEN $minRev AND $maxRev";
+if ( $offset > 0 ) {
+ $sql .= " ORDER BY rev_id DESC";
+ $expectedSign = -1;
+} else {
+ $expectedSign = 1;
+}
+
+$res = $dbw->query( $sql, $fname );
+
+$lastNormal = 0;
+$badRevs = array();
+$numGoodRevs = 0;
+
+while ( $row = $dbw->fetchObject( $res ) ) {
+ $timestamp = wfTimestamp( TS_UNIX, $row->rev_timestamp );
+ $delta = $timestamp - $lastNormal;
+ $sign = $delta == 0 ? 0 : $delta / abs( $delta );
+ if ( $sign == 0 || $sign == $expectedSign ) {
+ // Monotonic change
+ $lastNormal = $timestamp;
+ ++ $numGoodRevs;
+ continue;
+ } elseif ( abs( $delta ) <= $grace ) {
+ // Non-monotonic change within grace interval
+ ++ $numGoodRevs;
+ continue;
+ } else {
+ // Non-monotonic change larger than grace interval
+ $badRevs[] = $row->rev_id;
+ }
+}
+$dbw->freeResult( $res );
+
+$numBadRevs = count( $badRevs );
+if ( $numBadRevs > $numGoodRevs ) {
+ echo
+"The majority of revisions in the search interval are marked as bad.
+
+Are you sure the offset ($offset) has the right sign? Positive means the clock
+was incorrectly set forward, negative means the clock was incorrectly set back.
+
+If the offset is right, then increase the search interval until there are enough
+good revisions to provide a majority reference.
+";
+
+ exit(1);
+} elseif ( $numBadRevs == 0 ) {
+ echo "No bad revisions found.\n";
+ exit(0);
+}
+
+printf( "Fixing %d revisions (%.2f%% of revisions in search interval)\n",
+ $numBadRevs, $numBadRevs / ($numGoodRevs + $numBadRevs) * 100 );
+
+$fixup = -$offset;
+$sql = "UPDATE $revisionTable " .
+ "SET rev_timestamp=DATE_FORMAT(DATE_ADD(rev_timestamp, INTERVAL $fixup SECOND), '%Y%m%d%H%i%s') " .
+ "WHERE rev_id IN (" . $dbw->makeList( $badRevs ) . ')';
+//echo "$sql\n";
+$dbw->query( $sql, $fname );
+echo "Done\n";
+
+?>
diff --git a/maintenance/fixUserRegistration.php b/maintenance/fixUserRegistration.php
new file mode 100644
index 00000000..af8a68c2
--- /dev/null
+++ b/maintenance/fixUserRegistration.php
@@ -0,0 +1,31 @@
+<?php
+/**
+ * Fix the user_registration field.
+ * In particular, for values which are NULL, set them to the date of the first edit
+ */
+
+require_once( 'commandLine.inc' );
+
+$fname = 'fixUserRegistration.php';
+
+$dbr =& wfGetDB( DB_SLAVE );
+$dbw =& wfGetDB( DB_MASTER );
+
+// Get user IDs which need fixing
+$res = $dbr->select( 'user', 'user_id', 'user_registration IS NULL', $fname );
+
+while ( $row = $dbr->fetchObject( $res ) ) {
+ $id = $row->user_id;
+ // Get first edit time
+ $timestamp = $dbr->selectField( 'revision', 'MIN(rev_timestamp)', array( 'rev_user' => $id ), $fname );
+ // Update
+ if ( !empty( $timestamp ) ) {
+ $dbw->update( 'user', array( 'user_registration' => $timestamp ), array( 'user_id' => $id ), $fname );
+ print "$id $timestamp\n";
+ } else {
+ print "$id NULL\n";
+ }
+}
+print "\n";
+
+?>
diff --git a/maintenance/generateSitemap.php b/maintenance/generateSitemap.php
new file mode 100644
index 00000000..2cf8312a
--- /dev/null
+++ b/maintenance/generateSitemap.php
@@ -0,0 +1,463 @@
+<?php
+define( 'GS_MAIN', -2 );
+define( 'GS_TALK', -1 );
+/**
+ * Creates a Google sitemap for the site
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ *
+ * @copyright Copyright © 2005, Ævar Arnfjörð Bjarmason
+ * @copyright Copyright © 2005, Jens Frank <jeluf@gmx.de>
+ * @copyright Copyright © 2005, Brion Vibber <brion@pobox.com>
+ *
+ * @link http://www.google.com/webmasters/sitemaps/docs/en/about.html
+ * @link http://www.google.com/schemas/sitemap/0.84/sitemap.xsd
+ *
+ * @license http://www.gnu.org/copyleft/gpl.html GNU General Public License 2.0 or later
+ */
+
+class GenerateSitemap {
+ /**
+ * The maximum amount of urls in a sitemap file
+ *
+ * @link http://www.google.com/schemas/sitemap/0.84/sitemap.xsd
+ *
+ * @var int
+ */
+ var $url_limit;
+
+ /**
+ * The maximum size of a sitemap file
+ *
+ * @link http://www.google.com/webmasters/sitemaps/docs/en/protocol.html#faq_sitemap_size
+ *
+ * @var int
+ */
+ var $size_limit;
+
+ /**
+ * The path to prepend to the filename
+ *
+ * @var string
+ */
+ var $fspath;
+
+ /**
+ * The path to append to the domain name
+ *
+ * @var string
+ */
+ var $path;
+
+ /**
+ * Whether or not to use compression
+ *
+ * @var bool
+ */
+ var $compress;
+
+ /**
+ * The number of entries to save in each sitemap file
+ *
+ * @var array
+ */
+ var $limit = array();
+
+ /**
+ * Key => value entries of namespaces and their priorities
+ *
+ * @var array
+ */
+ var $priorities = array(
+ // Custom main namespaces
+ GS_MAIN => '0.5',
+ // Custom talk namesspaces
+ GS_TALK => '0.1',
+ // MediaWiki standard namespaces
+ NS_MAIN => '1.0',
+ NS_TALK => '0.1',
+ NS_USER => '0.5',
+ NS_USER_TALK => '0.1',
+ NS_PROJECT => '0.5',
+ NS_PROJECT_TALK => '0.1',
+ NS_IMAGE => '0.5',
+ NS_IMAGE_TALK => '0.1',
+ NS_MEDIAWIKI => '0.0',
+ NS_MEDIAWIKI_TALK => '0.1',
+ NS_TEMPLATE => '0.0',
+ NS_TEMPLATE_TALK => '0.1',
+ NS_HELP => '0.5',
+ NS_HELP_TALK => '0.1',
+ NS_CATEGORY => '0.5',
+ NS_CATEGORY_TALK => '0.1',
+ );
+
+ /**
+ * A one-dimensional array of namespaces in the wiki
+ *
+ * @var array
+ */
+ var $namespaces = array();
+
+ /**
+ * When this sitemap batch was generated
+ *
+ * @var string
+ */
+ var $timestamp;
+
+ /**
+ * A database slave object
+ *
+ * @var object
+ */
+ var $dbr;
+
+ /**
+ * A resource pointing to the sitemap index file
+ *
+ * @var resource
+ */
+ var $findex;
+
+
+ /**
+ * A resource pointing to a sitemap file
+ *
+ * @var resource
+ */
+ var $file;
+
+ /**
+ * A resource pointing to php://stderr
+ *
+ * @var resource
+ */
+ var $stderr;
+
+ /**
+ * Constructor
+ *
+ * @param string $fspath The path to prepend to the filenames, used to
+ * save them somewhere else than in the root directory
+ * @param string $path The path to append to the domain name
+ * @param bool $compress Whether to compress the sitemap files
+ */
+ function GenerateSitemap( $fspath, $path, $compress ) {
+ global $wgDBname, $wgScriptPath;
+
+ $this->url_limit = 50000;
+ $this->size_limit = pow( 2, 20 ) * 10;
+ $this->fspath = isset( $fspath ) ? $fspath : '';
+ $this->path = isset( $path ) ? $path : $wgScriptPath;
+ $this->compress = $compress;
+
+ $this->stderr = fopen( 'php://stderr', 'wt' );
+ $this->dbr =& wfGetDB( DB_SLAVE );
+ $this->generateNamespaces();
+ $this->timestamp = wfTimestamp( TS_ISO_8601, wfTimestampNow() );
+ $this->findex = fopen( "{$this->fspath}sitemap-index-$wgDBname.xml", 'wb' );
+ }
+
+ /**
+ * Generate a one-dimensional array of existing namespaces
+ */
+ function generateNamespaces() {
+ $fname = 'GenerateSitemap::generateNamespaces';
+
+ $res = $this->dbr->select( 'page',
+ array( 'page_namespace' ),
+ array(),
+ $fname,
+ array(
+ 'GROUP BY' => 'page_namespace',
+ 'ORDER BY' => 'page_namespace',
+ )
+ );
+
+ while ( $row = $this->dbr->fetchObject( $res ) )
+ $this->namespaces[] = $row->page_namespace;
+ }
+
+ /**
+ * Get the priority of a given namespace
+ *
+ * @param int $namespace The namespace to get the priority for
+ +
+ * @return string
+ */
+
+ function priority( $namespace ) {
+ return isset( $this->priorities[$namespace] ) ? $this->priorities[$namespace] : $this->guessPriority( $namespace );
+ }
+
+ /**
+ * If the namespace isn't listed on the priority list return the
+ * default priority for the namespace, varies depending on whether it's
+ * a talkpage or not.
+ *
+ * @param int $namespace The namespace to get the priority for
+ *
+ * @return string
+ */
+ function guessPriority( $namespace ) {
+ return Namespace::isMain( $namespace ) ? $this->priorities[GS_MAIN] : $this->priorities[GS_TALK];
+ }
+
+ /**
+ * Return a database resolution of all the pages in a given namespace
+ *
+ * @param int $namespace Limit the query to this namespace
+ *
+ * @return resource
+ */
+ function getPageRes( $namespace ) {
+ $fname = 'GenerateSitemap::getPageRes';
+
+ return $this->dbr->select( 'page',
+ array(
+ 'page_namespace',
+ 'page_title',
+ 'page_touched',
+ ),
+ array( 'page_namespace' => $namespace ),
+ $fname
+ );
+ }
+
+ /**
+ * Main loop
+ *
+ * @access public
+ */
+ function main() {
+ global $wgDBname, $wgContLang;
+
+ fwrite( $this->findex, $this->openIndex() );
+
+ foreach ( $this->namespaces as $namespace ) {
+ $res = $this->getPageRes( $namespace );
+ $this->file = false;
+ $this->generateLimit( $namespace );
+ $length = $this->limit[0];
+ $i = $smcount = 0;
+
+ $fns = $wgContLang->getFormattedNsText( $namespace );
+ $this->debug( "$namespace ($fns)" );
+ while ( $row = $this->dbr->fetchObject( $res ) ) {
+ if ( $i++ === 0 || $i === $this->url_limit + 1 || $length + $this->limit[1] + $this->limit[2] > $this->size_limit ) {
+ if ( $this->file !== false ) {
+ $this->write( $this->file, $this->closeFile() );
+ $this->close( $this->file );
+ }
+ $filename = $this->sitemapFilename( $namespace, $smcount++ );
+ $this->file = $this->open( $this->fspath . $filename, 'wb' );
+ $this->write( $this->file, $this->openFile() );
+ fwrite( $this->findex, $this->indexEntry( $filename ) );
+ $this->debug( "\t$filename" );
+ $length = $this->limit[0];
+ $i = 1;
+ }
+ $title = Title::makeTitle( $row->page_namespace, $row->page_title );
+ $date = wfTimestamp( TS_ISO_8601, $row->page_touched );
+ $entry = $this->fileEntry( $title->getFullURL(), $date, $this->priority( $namespace ) );
+ $length += strlen( $entry );
+ $this->write( $this->file, $entry );
+ }
+ if ( $this->file ) {
+ $this->write( $this->file, $this->closeFile() );
+ $this->close( $this->file );
+ }
+ }
+ fwrite( $this->findex, $this->closeIndex() );
+ fclose( $this->findex );
+ }
+
+ /**
+ * gzopen() / fopen() wrapper
+ *
+ * @return resource
+ */
+ function open( $file, $flags ) {
+ return $this->compress ? gzopen( $file, $flags ) : fopen( $file, $flags );
+ }
+
+ /**
+ * gzwrite() / fwrite() wrapper
+ */
+ function write( &$handle, $str ) {
+ if ( $this->compress )
+ gzwrite( $handle, $str );
+ else
+ fwrite( $handle, $str );
+ }
+
+ /**
+ * gzclose() / fclose() wrapper
+ */
+ function close( &$handle ) {
+ if ( $this->compress )
+ gzclose( $handle );
+ else
+ fclose( $handle );
+ }
+
+ /**
+ * Get a sitemap filename
+ *
+ * @static
+ *
+ * @param int $namespace The namespace
+ * @param int $count The count
+ *
+ * @return string
+ */
+ function sitemapFilename( $namespace, $count ) {
+ global $wgDBname;
+
+ $ext = $this->compress ? '.gz' : '';
+
+ return "sitemap-$wgDBname-NS_$namespace-$count.xml$ext";
+ }
+
+ /**
+ * Return the XML required to open an XML file
+ *
+ * @static
+ *
+ * @return string
+ */
+ function xmlHead() {
+ return '<?xml version="1.0" encoding="UTF-8"?>' . "\n";
+ }
+
+ /**
+ * Return the XML schema being used
+ *
+ * @static
+ *
+ * @returns string
+ */
+ function xmlSchema() {
+ return 'http://www.google.com/schemas/sitemap/0.84';
+ }
+
+ /**
+ * Return the XML required to open a sitemap index file
+ *
+ * @return string
+ */
+ function openIndex() {
+ return $this->xmlHead() . '<sitemapindex xmlns="' . $this->xmlSchema() . '">' . "\n";
+ }
+
+ /**
+ * Return the XML for a single sitemap indexfile entry
+ *
+ * @static
+ *
+ * @param string $filename The filename of the sitemap file
+ *
+ * @return string
+ */
+ function indexEntry( $filename ) {
+ return
+ "\t<sitemap>\n" .
+ "\t\t<loc>$filename</loc>\n" .
+ "\t\t<lastmod>{$this->timestamp}</lastmod>\n" .
+ "\t</sitemap>\n";
+ }
+
+ /**
+ * Return the XML required to close a sitemap index file
+ *
+ * @static
+ *
+ * @return string
+ */
+ function closeIndex() {
+ return "</sitemapindex>\n";
+ }
+
+ /**
+ * Return the XML required to open a sitemap file
+ *
+ * @return string
+ */
+ function openFile() {
+ return $this->xmlHead() . '<urlset xmlns="' . $this->xmlSchema() . '">' . "\n";
+ }
+
+ /**
+ * Return the XML for a single sitemap entry
+ *
+ * @static
+ *
+ * @param string $url An RFC 2396 compilant URL
+ * @param string $date A ISO 8601 date
+ * @param string $priority A priority indicator, 0.0 - 1.0 inclusive with a 0.1 stepsize
+ *
+ * @return string
+ */
+ function fileEntry( $url, $date, $priority ) {
+ return
+ "\t<url>\n" .
+ "\t\t<loc>$url</loc>\n" .
+ "\t\t<lastmod>$date</lastmod>\n" .
+ "\t\t<priority>$priority</priority>\n" .
+ "\t</url>\n";
+ }
+
+ /**
+ * Return the XML required to close sitemap file
+ *
+ * @static
+ * @return string
+ */
+ function closeFile() {
+ return "</urlset>\n";
+ }
+
+ /**
+ * Write a string to stderr followed by a UNIX newline
+ */
+ function debug( $str ) {
+ fwrite( $this->stderr, "$str\n" );
+ }
+
+ /**
+ * Populate $this->limit
+ */
+ function generateLimit( $namespace ) {
+ $title = Title::makeTitle( $namespace, str_repeat( "\xf0\xa8\xae\x81", 63 ) . "\xe5\x96\x83" );
+
+ $this->limit = array(
+ strlen( $this->openFile() ),
+ strlen( $this->fileEntry( $title->getFullUrl(), wfTimestamp( TS_ISO_8601, wfTimestamp() ), $this->priority( $namespace ) ) ),
+ strlen( $this->closeFile() )
+ );
+ }
+}
+
+if ( in_array( '--help', $argv ) ) {
+ echo
+ "Usage: php generateSitemap.php [host] [options]\n" .
+ "\thost = hostname\n" .
+ "\toptions:\n" .
+ "\t\t--help\tshow this message\n" .
+ "\t\t--fspath\tThe file system path to save to, e.g /tmp/sitemap/\n" .
+ "\t\t--path\tThe http path to use, e.g. /wiki\n" .
+ "\t\t--compress=[yes|no]\tcompress the sitemap files, default yes\n";
+ die( -1 );
+}
+
+if ( isset( $argv[1] ) && strpos( $argv[1], '--' ) !== 0 )
+ $_SERVER['SERVER_NAME'] = $argv[1];
+
+$optionsWithArgs = array( 'fspath', 'path', 'compress' );
+require_once 'commandLine.inc';
+
+$gs = new GenerateSitemap( @$options['fspath'], @$options['path'], @$options['compress'] !== 'no' );
+$gs->main();
+?>
diff --git a/maintenance/importDump.php b/maintenance/importDump.php
new file mode 100644
index 00000000..1bca3296
--- /dev/null
+++ b/maintenance/importDump.php
@@ -0,0 +1,141 @@
+<?php
+/**
+ * Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+ * http://www.mediawiki.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ * http://www.gnu.org/copyleft/gpl.html
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+$optionsWithArgs = array( 'report' );
+
+require_once( 'commandLine.inc' );
+require_once( 'SpecialImport.php' );
+
+class BackupReader {
+ var $reportingInterval = 100;
+ var $reporting = true;
+ var $pageCount = 0;
+ var $revCount = 0;
+ var $dryRun = false;
+
+ function BackupReader() {
+ $this->stderr = fopen( "php://stderr", "wt" );
+ }
+
+ function reportPage( $page ) {
+ $this->pageCount++;
+ }
+
+ function handleRevision( $rev ) {
+ $title = $rev->getTitle();
+ if (!$title) {
+ $this->progress( "Got bogus revision with null title!" );
+ return;
+ }
+ $display = $title->getPrefixedText();
+ $timestamp = $rev->getTimestamp();
+ #echo "$display $timestamp\n";
+
+ $this->revCount++;
+ $this->report();
+
+ if( !$this->dryRun ) {
+ call_user_func( $this->importCallback, $rev );
+ }
+ }
+
+ function report( $final = false ) {
+ if( $final xor ( $this->pageCount % $this->reportingInterval == 0 ) ) {
+ $this->showReport();
+ }
+ }
+
+ function showReport() {
+ if( $this->reporting ) {
+ $delta = wfTime() - $this->startTime;
+ if( $delta ) {
+ $rate = $this->pageCount / $delta;
+ $revrate = $this->revCount / $delta;
+ } else {
+ $rate = '-';
+ $revrate = '-';
+ }
+ $this->progress( "$this->pageCount ($rate pages/sec $revrate revs/sec)" );
+ }
+ }
+
+ function progress( $string ) {
+ fwrite( $this->stderr, $string . "\n" );
+ }
+
+ function importFromFile( $filename ) {
+ if( preg_match( '/\.gz$/', $filename ) ) {
+ $filename = 'compress.zlib://' . $filename;
+ }
+ $file = fopen( $filename, 'rt' );
+ return $this->importFromHandle( $file );
+ }
+
+ function importFromStdin() {
+ $file = fopen( 'php://stdin', 'rt' );
+ return $this->importFromHandle( $file );
+ }
+
+ function importFromHandle( $handle ) {
+ $this->startTime = wfTime();
+
+ $source = new ImportStreamSource( $handle );
+ $importer = new WikiImporter( $source );
+
+ $importer->setPageCallback( array( &$this, 'reportPage' ) );
+ $this->importCallback = $importer->setRevisionCallback(
+ array( &$this, 'handleRevision' ) );
+
+ return $importer->doImport();
+ }
+}
+
+if( wfReadOnly() ) {
+ wfDie( "Wiki is in read-only mode; you'll need to disable it for import to work.\n" );
+}
+
+$reader = new BackupReader();
+if( isset( $options['quiet'] ) ) {
+ $reader->reporting = false;
+}
+if( isset( $options['report'] ) ) {
+ $reader->reportingInterval = intval( $options['report'] );
+}
+if( isset( $options['dry-run'] ) ) {
+ $reader->dryRun = true;
+}
+
+if( isset( $args[0] ) ) {
+ $result = $reader->importFromFile( $args[0] );
+} else {
+ $result = $reader->importFromStdin();
+}
+
+if( WikiError::isError( $result ) ) {
+ echo $result->getMessage() . "\n";
+} else {
+ echo "Done!\n";
+}
+
+?>
diff --git a/maintenance/importImages.inc.php b/maintenance/importImages.inc.php
new file mode 100644
index 00000000..bf48c0c7
--- /dev/null
+++ b/maintenance/importImages.inc.php
@@ -0,0 +1,67 @@
+<?php
+
+/**
+ * Support functions for the importImages script
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+/**
+ * Search a directory for files with one of a set of extensions
+ *
+ * @param $dir Path to directory to search
+ * @param $exts Array of extensions to search for
+ * @return mixed Array of filenames on success, or false on failure
+ */
+function findFiles( $dir, $exts ) {
+ if( is_dir( $dir ) ) {
+ if( $dhl = opendir( $dir ) ) {
+ while( ( $file = readdir( $dhl ) ) !== false ) {
+ if( is_file( $dir . '/' . $file ) ) {
+ list( $name, $ext ) = splitFilename( $dir . '/' . $file );
+ if( array_search( strtolower( $ext ), $exts ) !== false )
+ $files[] = $dir . '/' . $file;
+ }
+ }
+ return $files;
+ } else {
+ return false;
+ }
+ } else {
+ return false;
+ }
+}
+
+/**
+ * Split a filename into filename and extension
+ *
+ * @param $filename Filename
+ * @return array
+ */
+function splitFilename( $filename ) {
+ $parts = explode( '.', $filename );
+ $ext = $parts[ count( $parts ) - 1 ];
+ unset( $parts[ count( $parts ) - 1 ] );
+ $fname = implode( '.', $parts );
+ return array( $fname, $ext );
+}
+
+/**
+ * Given an image hash, check that the structure exists to save the image file
+ * and create it if it doesn't
+ *
+ * @param $hash Part of an image hash, e.g. /f/fd/
+ */
+function makeHashPath( $hash ) {
+ global $wgUploadDirectory;
+ $parts = explode( '/', substr( $hash, 1, strlen( $hash ) - 2 ) );
+ if( !is_dir( $wgUploadDirectory . '/' . $parts[0] ) )
+ mkdir( $wgUploadDirectory . '/' . $parts[0] );
+ if( !is_dir( $wgUploadDirectory . '/' . $hash ) )
+ mkdir( $wgUploadDirectory . '/' . $hash );
+}
+
+
+?> \ No newline at end of file
diff --git a/maintenance/importImages.php b/maintenance/importImages.php
new file mode 100644
index 00000000..925c64b7
--- /dev/null
+++ b/maintenance/importImages.php
@@ -0,0 +1,101 @@
+<?php
+
+/**
+ * Maintenance script to import one or more images from the local file system into
+ * the wiki without using the web-based interface
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+require_once( 'commandLine.inc' );
+require_once( 'importImages.inc.php' );
+echo( "Import Images\n\n" );
+
+# Need a directory and at least one extension
+if( count( $args ) > 1 ) {
+
+ $dir = array_shift( $args );
+
+ # Check the allowed extensions
+ while( $ext = array_shift( $args ) )
+ $exts[] = ltrim( $ext, '.' );
+
+ # Search the directory given and pull out suitable candidates
+ $files = findFiles( $dir, $exts );
+
+ # Set up a fake user for this operation
+ $wgUser = User::newFromName( 'Image import script' );
+ $wgUser->setLoaded( true );
+
+ # Batch "upload" operation
+ foreach( $files as $file ) {
+
+ $base = basename( $file );
+
+ # Validate a title
+ $title = Title::makeTitleSafe( NS_IMAGE, $base );
+ if( is_object( $title ) ) {
+
+ # Check existence
+ $image = new Image( $title );
+ if( !$image->exists() ) {
+
+ global $wgUploadDirectory;
+
+ # copy() doesn't create paths so if the hash path doesn't exist, we
+ # have to create it
+ makeHashPath( wfGetHashPath( $image->name ) );
+
+ # Stash the file
+ echo( "Saving {$base}..." );
+
+ if( copy( $file, $image->getFullPath() ) ) {
+
+ echo( "importing..." );
+
+ # Grab the metadata
+ $image->loadFromFile();
+
+ # Record the upload
+ if( $image->recordUpload( '', 'Importing image file' ) ) {
+
+ # We're done!
+ echo( "done.\n" );
+
+ } else {
+ echo( "failed.\n" );
+ }
+
+ } else {
+ echo( "failed.\n" );
+ }
+
+ } else {
+ echo( "{$base} could not be imported; a file with this name exists in the wiki\n" );
+ }
+
+ } else {
+ echo( "{$base} could not be imported; a valid title cannot be produced\n" );
+ }
+
+ }
+
+
+} else {
+ showUsage();
+}
+
+exit();
+
+function showUsage( $reason = false ) {
+ if( $reason )
+ echo( $reason . "\n" );
+ echo( "USAGE: php importImages.php <dir> <ext1> <ext2>\n\n" );
+ echo( "<dir> : Path to the directory containing images to be imported\n" );
+ echo( "<ext1+> File extensions to import\n\n" );
+ exit();
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/importLogs.inc b/maintenance/importLogs.inc
new file mode 100644
index 00000000..154657c8
--- /dev/null
+++ b/maintenance/importLogs.inc
@@ -0,0 +1,144 @@
+<?php
+# Copyright (C) 2004 Brion Vibber <brion@pobox.com>
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+/**
+ * Attempt to import existing log pages into the log tables.
+ *
+ * Not yet complete.
+ *
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once( 'GlobalFunctions.php' );
+require_once( 'Database.php' );
+require_once( 'Article.php' );
+require_once( 'LogPage.php' );
+
+/**
+ * Log importer
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+class LogImporter {
+ var $dummy = false;
+
+ function LogImporter( $type ) {
+ $this->type = $type;
+ $this->db =& wfGetDB( DB_MASTER );
+ $this->actions = $this->setupActions();
+ }
+
+ function setupActions() {
+ $actions = array();
+ foreach( LogPage::validActions( $this->type ) as $action ) {
+ $key = "{$this->type}/$action";
+ $actions[$key] = $this->makeLineRegexp( $this->type, $action );
+ }
+ return $actions;
+ }
+
+ function makeLineRegexp( $type, $action ) {
+ $linkRegexp = '(?:\[\[)?([^|\]]+?)(?:\|[^\]]+?)?(?:\]\])?';
+ $linkRegexp2 = '\[\[([^|\]]+?)(?:\|[^\]]+?)?\]\]';
+
+ $text = LogPage::actionText( $type, $action );
+ $text = preg_quote( $text, '/' );
+ $text = str_replace( '\$1', $linkRegexp, $text );
+ $text = '^(.*?) ' . $linkRegexp2 . ' ' . $text;
+ $text .= '(?: <em>\((.*)\)<\/em>)?';
+ $text = "/$text/";
+ return $text;
+ }
+
+ function importText( $text ) {
+ if( $this->dummy ) {
+ print $text;
+ var_dump( $this->actions );
+ }
+ $lines = explode( '<li>', $text );
+ foreach( $lines as $line ) {
+ if( preg_match( '!^(.*)</li>!', $line, $matches ) ) {
+ $this->importLine( $matches[1] );
+ }
+ }
+ }
+
+ function fixDate( $date ) {
+ # Yuck! Parsing multilingual date formats??!!!!???!!??!
+ # 01:55, 23 Aug 2004 - won't take in strtotimr
+ # "Aug 23 2004 01:55" - seems ok
+ # TODO: multilingual attempt to extract from the data in Language
+ if( preg_match( '/^(\d+:\d+(?::\d+)?), (.*)$/', $date, $matches ) ) {
+ $date = $matches[2] . ' ' . $matches[1];
+ }
+ $n = strtotime( $date ) + date("Z");
+ # print gmdate( 'D, d M Y H:i:s T', $n ) . "\n";
+ $timestamp = wfTimestamp( TS_MW, $n );
+ return $timestamp;
+ }
+
+ function importLine( $line ) {
+ foreach( $this->actions as $action => $regexp ) {
+ if( preg_match( $regexp, $line, $matches ) ) {
+ if( $this->dummy ) {
+ #var_dump( $matches );
+ }
+ $date = $this->fixDate( $matches[1] );
+ $user = Title::newFromText( $matches[2] );
+ $target = Title::newFromText( $matches[3] );
+ if( isset( $matches[4] ) ) {
+ $comment = $matches[4];
+ } else {
+ $comment = '';
+ }
+
+ $insert = array(
+ 'log_type' => $this->type,
+ 'log_action' => preg_replace( '!^.*/!', '', $action ),
+ 'log_timestamp' => $date,
+ 'log_user' => intval( User::idFromName( $user->getText() ) ),
+ 'log_namespace' => $target->getNamespace(),
+ 'log_title' => $target->getDBkey(),
+ 'log_comment' => wfUnescapeWikiText( $comment ),
+ );
+ if( $this->dummy ) {
+ var_dump( $insert );
+ } else {
+ # FIXME: avoid duplicates!
+ $this->db->insert( 'logging', $insert );
+ }
+ break;
+ }
+ }
+ }
+}
+
+function wfUnescapeWikiText( $text ) {
+ $text = str_replace(
+ array( '&#91;', '&#124;', '&#39;', 'ISBN&#32;', '&#58;//' , "\n&#61;", '&#123;&#123;' ),
+ array( '[', '|', "'", 'ISBN ' , '://' , "\n=", '{{' ),
+ $text );
+ return $text;
+}
+
+?>
diff --git a/maintenance/importLogs.php b/maintenance/importLogs.php
new file mode 100644
index 00000000..6187c2e6
--- /dev/null
+++ b/maintenance/importLogs.php
@@ -0,0 +1,27 @@
+<?php
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once( "commandLine.inc" );
+require_once( "importLogs.inc" );
+
+#print $text;
+#exit();
+
+foreach( LogPage::validTypes() as $type ) {
+ if( $type == '' ) continue;
+
+ $page = LogPage::logName( $type );
+ $log = new Article( Title::makeTitleSafe( NS_PROJECT, $page ) );
+ $text = $log->fetchContent();
+
+ $importer = new LogImporter( $type );
+ $importer->dummy = true;
+ $importer->importText( $text );
+}
+
+?>
diff --git a/maintenance/importPhase2.php b/maintenance/importPhase2.php
new file mode 100644
index 00000000..a73657b5
--- /dev/null
+++ b/maintenance/importPhase2.php
@@ -0,0 +1,370 @@
+<?php
+# MediaWiki 'phase 2' to current format import script
+# (import format current as of 1.2.0, March 2004)
+#
+# Copyright (C) 2004 Brion Vibber <brion@pobox.com>
+# Portions by Lee Daniel Crocker, 2002
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+/**
+ * @todo document
+ * @deprecated
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+die( "This import script is not currently maintained.
+If you need it you'll have to modify it as necessary.\n");
+
+if ( ! is_readable( "../LocalSettings.php" ) ) {
+ print "A copy of your installation's LocalSettings.php\n" .
+ "must exist in the source directory.\n";
+ exit();
+}
+
+$wgCommandLineMode = true;
+ini_set("implicit_flush", 1);
+
+$DP = "../includes";
+require_once( "../LocalSettings.php" );
+require_once( "../AdminSettings.php" );
+
+$wgDBuser = $wgDBadminuser;
+$wgDBpassword = $wgDBadminpassword;
+
+$sep = ( DIRECTORY_SEPARATOR == "\\" ) ? ";" : ":";
+ini_set( "include_path", "$IP$sep$include_path" );
+
+require_once( "Setup.php" );
+
+require_once( "../install-utils.inc" );
+require_once( "InitialiseMessages.inc" );
+require_once( "rebuildlinks.inc" );
+require_once( "rebuildrecentchanges.inc" );
+require_once( "rebuildtextindex.inc" );
+
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+class Phase2Importer {
+ var $olddb, $titleCache;
+
+ function Phase2Importer( $database ) {
+ $this->olddb = $database;
+ $this->titleCache = new TitleCache;
+ }
+
+ function importAll() {
+ $this->importCurData();
+ $this->fixCurTitles();
+
+ $this->importOldData();
+ $this->fixOldTitles();
+
+ $this->importUserData();
+ $this->fixUserOptions();
+
+ $this->importWatchlists();
+
+ $this->importLinkData();
+
+ /*
+ # For some reason this is broken. RecentChanges will just start anew...
+ rebuildRecentChangesTablePass1();
+ rebuildRecentChangesTablePass2();
+ */
+
+ print "Rebuilding search index:\n";
+ dropTextIndex();
+ rebuildTextIndex();
+ createTextIndex();
+
+ initialiseMessages();
+ }
+
+ # Simple import functions; for the most part these are pretty straightforward.
+ # MySQL copies everything over to the new database and tweaks a few things.
+ function importCurData() {
+ print "Clearing pages from default install, if any...\n";
+ wfQuery( "DELETE FROM cur", DB_MASTER );
+
+ print "Importing current revision data...\n";
+ wfQuery( "INSERT INTO cur (cur_id,cur_namespace,cur_title,cur_text,cur_comment,
+ cur_user,cur_user_text,cur_timestamp,cur_restrictions,cur_counter,
+ cur_is_redirect,cur_minor_edit,cur_is_new,cur_random,cur_touched)
+ SELECT cur_id,0,cur_title,cur_text,cur_comment,
+ cur_user,cur_user_text,cur_timestamp,REPLACE(cur_restrictions,'is_',''),cur_counter,
+ cur_text like '#redirect%',cur_minor_edit,0,RAND(),NOW()+0,
+ FROM {$this->olddb}.cur", DB_MASTER );
+ $n = mysql_affected_rows();
+ print "$n rows imported.\n";
+ }
+
+ function importOldData() {
+ print "Clearing old revision data from default install, if any...\n";
+ wfQuery( "DELETE FROM old", DB_MASTER );
+
+ print "Importing old revision data...\n";
+ wfQuery( "INSERT INTO old (old_id,old_namespace,old_title,old_text,old_comment,
+ old_user,old_user_text,old_timestamp,old_minor_edit,old_flags)
+ SELECT old_id,0,old_title,old_text,old_comment,
+ old_user,old_user_text,old_timestamp,old_minor_edit,''
+ FROM {$this->olddb}.old", DB_MASTER );
+ $n = mysql_affected_rows();
+ print "$n rows imported.\n";
+ }
+
+ function importUserData() {
+ print "Clearing users from default install, if any...\n";
+ wfQuery( "DELETE FROM user", DB_MASTER );
+
+ print "Importing user data...\n";
+ wfQuery( "INSERT INTO $newdb.user (user_id,user_name,user_rights,
+ user_password,user_newpassword,user_email,user_options,user_touched)
+ SELECT user_id,user_name,REPLACE(user_rights,'is_',''),
+ MD5(CONCAT(user_id,'-',MD5(user_password))),'',user_email,user_options,NOW()+0
+ FROM {$this->olddb}.user", DB_MASTER );
+ $n = mysql_affected_rows();
+ print "$n rows imported.\n";
+ }
+
+ # A little less clean...
+ function importWatchlists() {
+ print "Clearing watchlists from default install, if any...\n";
+ wfQuery( "DELETE FROM watchlist", DB_MASTER );
+
+ print "Importing watchlists...";
+ $res = wfQuery( "SELECT user_id,user_watch FROM {$this->olddb}.user WHERE user_watch != ''", DB_MASTER );
+ $total = wfNumRows( $res );
+ $n = 0;
+ print " ($total total)\n";
+
+ while( $row = wfFetchObject( $res ) ) {
+ $id = intval( $row->user_id );
+ $list = explode( "\n", $row->user_watch );
+ foreach( $list as $page ) {
+ $title = $this->titleCache->fetch( $page );
+ if( is_null( $title ) ) {
+ print "Caught bad title '{$row->title}'\n";
+ } else {
+ $ns = $title->getNamespace();
+ $t = wfStrencode( $title->getDBkey() );
+ wfQuery( "INSERT INTO watchlist(wl_user,wl_namespace,wl_title) VALUES ($id,$ns,'$t')", DB_MASTER );
+ }
+ }
+ if( ++$n % 50 == 0 ) {
+ print "$n\n";
+ }
+ }
+ wfFreeResult( $res );
+ }
+
+ function importLinkData() {
+ # MUST BE CALLED BEFORE! fixCurTitles()
+ print "Clearing links from default install, if any...\n";
+ wfQuery( "DELETE FROM links", DB_MASTER );
+ wfQuery( "DELETE FROM brokenlinks", DB_MASTER );
+
+ print "Importing live links...";
+ wfQuery( "INSERT INTO links (l_from, l_to)
+ SELECT DISTINCT linked_from,cur_id
+ FROM {$this->olddb}.linked,{$this->olddb}.cur
+ WHERE linked_to=cur_title", DB_MASTER );
+ $n = mysql_affected_rows();
+ print "$n rows imported.\n";
+
+ print "Importing broken links...";
+ wfQuery( "INSERT INTO brokenlinks (bl_from, bl_to)
+ SELECT DISTINCT cur_id,unlinked_to
+ FROM {$this->olddb}.unlinked,{$this->olddb}.cur
+ WHERE unlinked_from=cur_title", DB_MASTER );
+ $n = mysql_affected_rows();
+ print "$n rows imported.\n";
+ }
+
+ # Fixup functions: munge data that's already been brought into tables
+ function fixCurTitles() {
+ $this->fixTitles( "cur" );
+ }
+
+ function fixOldTitles() {
+ $this->fixTitles( "old" );
+ }
+
+ function fixTitles( $table ) {
+ print "Fixing titles in $table...";
+ $res = wfQuery( "SELECT DISTINCT {$table}_title AS title FROM $table", DB_MASTER );
+ $total = wfNumRows( $res );
+ $n = 0;
+ print " ($total total)\n";
+
+ while( $row = wfFetchObject( $res ) ) {
+ $xt = wfStrencode( $row->title );
+ $title = $this->titleCache->fetch( $row->title );
+ if( is_null( $title ) ) {
+ print "Caught bad title '{$row->title}'\n";
+ } else {
+ $ns = $title->getNamespace();
+ $t = wfStrencode( $title->getDBkey() );
+ wfQuery( "UPDATE $table SET {$table}_namespace=$ns,{$table}_title='$t'
+ WHERE {$table}_namespace=0 AND {$table}_title='$xt'", DB_MASTER );
+ }
+ if( ++$n % 50 == 0 ) {
+ print "$n\n";
+ }
+ }
+ wfFreeResult( $res );
+ }
+
+ function rewriteUserOptions( $in )
+ {
+ $s = urldecode( $in );
+ $a = explode( "\n", $s );
+
+ foreach ( $a as $l ) {
+ if ( preg_match( "/^([A-Za-z0-9_]+)=(.*)/", $l, $m ) ) {
+ $ops[$m[1]] = $m[2];
+ }
+ }
+ $nops = array();
+
+ $q = strtolower( $ops["quickBar"] );
+ if ( $q == "none" ) { $q = 0; }
+ else { $q = 1; } # Default to left
+ $nops["quickbar"] = $q;
+
+ if ( $ops["markupNewTopics"] == "inverse" ) {
+ $nops["highlightbroken"] = 1;
+ }
+ $sk = substr( strtolower( $ops["skin"] ), 0, 4 );
+ if ( "star" == $sk ) { $sk = 0; }
+ else if ( "nost" == $sk ) { $sk = 1; }
+ else if ( "colo" == $sk ) { $sk = 2; }
+ else { $sk = 0; }
+ $nops["skin"] = $sk;
+
+ $u = strtolower( $ops["underlineLinks"] );
+ if ( "yes" == $u || "on" == $u ) { $nops["underline"] = 1; }
+ else { $nops["underline"] = 0; }
+
+ $t = ( (int) ($ops["hourDiff"]) );
+ if ( $t < -23 || $t > 23 ) { $t = 0; }
+ if ( 0 != $t ) { $nops["timecorrection"] = $t; }
+
+ $j = strtolower( $ops["justify"] );
+ if ( "yes" == $j || "on" == $j ) { $nops["justify"] = 1; }
+ $n = strtolower( $ops["numberHeadings"] );
+ if ( "yes" == $n || "on" == $n ) { $nops["numberheadings"] = 1; }
+ $h = strtolower( $ops["hideMinor"] );
+ if ( "yes" == $h || "on" == $h ) { $nops["hideminor"] = 1; }
+ $r = strtolower( $ops["rememberPassword"] );
+ if ( "yes" == $r || "on" == $r ) { $nops["rememberpassword"] = 1; }
+ $s = strtolower( $ops["showHover"] );
+ if ( "yes" == $s || "on" == $s ) { $nops["hover"] = 1; }
+
+ $c = $ops["cols"];
+ if ( $c < 20 || $c > 200 ) { $nops["cols"] = 80; }
+ else { $nops["cols"] = $c; }
+ $r = $ops["rows"];
+ if ( $r < 5 || $r > 100 ) { $nops["rows"] = 20; }
+ else { $nops["rows"] = $r; }
+ $r = $ops["resultsPerPage"];
+ if ( $r < 3 || $r > 500 ) { $nops["searchlimit"] = 20; }
+ else { $nops["searchlimit"] = $r; }
+ $r = $ops["viewRecentChanges"];
+ if ( $r < 10 || $r > 1000 ) { $nops["rclimit"] = 50; }
+ else { $nops["rclimit"] = $r; }
+ $nops["rcdays"] = 3;
+
+ $a = array();
+ foreach ( $nops as $oname => $oval ) {
+ array_push( $a, "$oname=$oval" );
+ }
+ $s = implode( "\n", $a );
+ return $s;
+ }
+
+ function fixUserOptions() {
+ print "Fixing user options...";
+ $res = wfQuery( "SELECT user_id,user_options FROM user", DB_MASTER );
+ $total = wfNumRows( $res );
+ $n = 0;
+ print " ($total total)\n";
+
+ while( $row = wfFetchObject( $res ) ) {
+ $id = intval( $row->user_id );
+ $option = wfStrencode( $this->rewriteUserOptions( $row->user_options ) );
+ wfQuery( "UPDATE user SET user_options='$option' WHERE user_id=$id LIMIT 1", DB_MASTER );
+ if( ++$n % 50 == 0 ) {
+ print "$n\n";
+ }
+ }
+ wfFreeResult( $res );
+ }
+
+}
+
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+class TitleCache {
+ var $hash = array();
+
+ function &fetch( $dbkey ) {
+ if( !isset( $hash[$dbkey] ) ) {
+ $hash[$dbkey] = Title::newFromDBkey( $dbkey );
+ }
+ return $hash[$dbkey];
+ }
+
+}
+
+#
+print "You should have already run the installer to create a fresh, blank database.\n";
+print "Data will be inserted into '$wgDBname'. THIS SHOULD BE EMPTY AND ANY DATA IN IN WILL BE ERASED!\n";
+print "\nIf that's not what you want, ABORT NOW!\n\n";
+
+print "Please enter the name of the old 'phase 2'-format database that will be used as a source:\n";
+print "Old database name [enciclopedia]: ";
+$olddb = readconsole();
+if( empty( $olddb ) ) $olddb = "enciclopedia";
+
+if( $olddb == $wgDBname ) {
+ die( "Can't upgrade in-place! You must create a new database and copy data into it.\n" );
+}
+
+print "\nSource database: '$olddb'\n";
+print " Dest database: '$wgDBname'\n";
+print "Is this correct? Anything in '$wgDBname' WILL BE DESTROYED. [y/N] ";
+$response = readconsole();
+if( strtolower( $response{0} ) != 'y' ) {
+ die( "\nAborted by user.\n" );
+}
+
+print "Starting import....\n";
+
+$wgTitle = Title::newFromText( "Conversion script" );
+$importer = new Phase2Importer( $olddb );
+$importer->importAll();
+
+?>
diff --git a/maintenance/importTextFile.inc b/maintenance/importTextFile.inc
new file mode 100644
index 00000000..50b936c1
--- /dev/null
+++ b/maintenance/importTextFile.inc
@@ -0,0 +1,75 @@
+<?php
+
+/**
+ * Support functions for the importTextFile script
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+require_once( "$IP/includes/RecentChange.php" );
+
+/**
+ * Insert a new article
+ *
+ * @param $title Title of the article
+ * @param $text Text of the article
+ * @param $user User associated with the edit
+ * @param $comment Edit summary
+ * @param $rc Whether or not to add a recent changes event
+ * @return bool
+ */
+function insertNewArticle( &$title, $text, &$user, $comment, $rc ) {
+ if( !$title->exists() ) {
+ # Create the article
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->immediateBegin();
+ $article = new Article( $title );
+ $articleId = $article->insertOn( $dbw );
+ # Prepare and save associated revision
+ $revision = new Revision( array( 'page' => $articleId, 'text' => $text, 'user' => $user->mId, 'user_text' => $user->getName(), 'comment' => $comment ) );
+ $revisionId = $revision->insertOn( $dbw );
+ # Make it the current revision
+ $article->updateRevisionOn( $dbw, $revision );
+ $dbw->immediateCommit();
+ # Update recent changes if appropriate
+ if( $rc )
+ updateRecentChanges( $dbw, $title, $user, $comment, strlen( $text ), $articleId );
+ # Touch links etc.
+ Article::onArticleCreate( $title );
+ $article->editUpdates( $text, $comment, false, $dbw->timestamp(), $revisionId );
+ return true;
+ } else {
+ # Title exists; touch nothing
+ return false;
+ }
+}
+
+/**
+ * Turn a filename into a title
+ *
+ * @param $filename Filename to be transformed
+ * @return Title
+ */
+function titleFromFilename( $filename ) {
+ $parts = explode( '/', $filename );
+ $parts = explode( '.', $parts[ count( $parts ) - 1 ] );
+ return Title::newFromText( $parts[0] );
+}
+
+/**
+ * Update recent changes with the page creation event
+ *
+ * @param $dbw Database in use
+ * @param $title Title of the new page
+ * @param $user User responsible for the creation
+ * @param $comment Edit summary associated with the edit
+ * @param $size Size of the page
+ * @param $articleId Article identifier
+ */
+function updateRecentChanges( &$dbw, &$title, &$user, $comment, $size, $articleId ) {
+ RecentChange::notifyNew( $dbw->timestamp(), $title, false, $user, $comment, 'default', '', $size, $articleId );
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/importTextFile.php b/maintenance/importTextFile.php
new file mode 100644
index 00000000..625763be
--- /dev/null
+++ b/maintenance/importTextFile.php
@@ -0,0 +1,111 @@
+<?php
+
+/**
+ * Maintenance script to insert an article, importing text from a file
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+$options = array( 'help', 'norc' );
+$optionsWithArgs = array( 'title', 'user', 'comment' );
+require_once( 'commandLine.inc' );
+require_once( 'importTextFile.inc' );
+echo( "Import Text File\n\n" );
+
+if( !isset( $options['help'] ) || !$options['help'] ) {
+
+ # Check file existence
+ $filename = $args[0];
+ echo( "Using file '{$filename}'..." );
+ if( file_exists( $filename ) ) {
+ echo( "found.\n" );
+
+ # Work out the title for the page
+ if( isset( $option['title'] ) || trim( $options['title'] ) != '' ) {
+ $titleText = $options['title'];
+ # Use the supplied title
+ echo( "Using title '{$titleText}'..." );
+ $title = Title::newFromText( $options['title'] );
+ } else {
+ # Attempt to make a title out of the filename
+ echo( "Using title from filename..." );
+ $title = titleFromFilename( $filename );
+ }
+
+ # Check the title's valid
+ if( !is_null( $title ) && is_object( $title ) ) {
+ echo( "ok.\n" );
+
+ # Read in the text
+ $text = file_get_contents( $filename );
+
+ # Check the supplied user and fall back to a default if needed
+ if( isset( $options['user'] ) && trim( $options['user'] ) != '' ) {
+ $username = $options['user'];
+ } else {
+ $username = 'MediaWiki default';
+ }
+ echo( "Using user '{$username}'..." );
+ $user = User::newFromName( $username );
+
+ # Check the user's valid
+ if( !is_null( $user ) && is_object( $user ) ) {
+ echo( "ok.\n" );
+ $wgUser =& $user;
+
+ # If a comment was supplied, use it (replace _ with spaces ) else use a default
+ if( isset( $options['comment'] ) || trim( $options['comment'] != '' ) ) {
+ $comment = str_replace( '_', ' ', $options['comment'] );
+ } else {
+ $comment = 'Importing text file';
+ }
+ echo( "Using edit summary '{$comment}'.\n" );
+
+ # Do we need to update recent changes?
+ if( isset( $options['norc'] ) && $options['norc'] ) {
+ $rc = false;
+ } else {
+ $rc = true;
+ }
+
+ # Attempt the insertion
+ echo( "Attempting to insert page..." );
+ $success = insertNewArticle( $title, $text, $user, $comment, $rc );
+ if( $success ) {
+ echo( "done.\n" );
+ } else {
+ echo( "failed. Title exists.\n" );
+ }
+
+ } else {
+ # Dud user
+ echo( "invalid username.\n" );
+ }
+
+ } else {
+ # Dud title
+ echo( "invalid title.\n" );
+ }
+
+ } else {
+ # File not found
+ echo( "not found.\n" );
+ }
+
+} else {
+ # Show help
+ echo( "Imports the contents of a text file into a wiki page.\n\n" );
+ echo( "USAGE: php importTextFile.php [--help|--title <title>|--user <user>|--comment <comment>|--norc] <filename>\n\n" );
+ echo( " --help: Show this help information\n" );
+ echo( " --title <title> : Title for the new page; if not supplied, the filename is used as a base for the title\n" );
+ echo( " --user <user> : User to be associated with the edit; if not supplied, a default is used\n" );
+ echo( "--comment <comment> : Edit summary to be associated with the edit; underscores are transformed into spaces; if not supplied, a default is used\n" );
+ echo( " <filename> : Path to the file containing the wikitext to import\n" );
+ echo( " --norc : Do not add a page creation event to recent changes\n" );
+
+}
+echo( "\n" );
+
+?> \ No newline at end of file
diff --git a/maintenance/importUseModWiki.php b/maintenance/importUseModWiki.php
new file mode 100644
index 00000000..15f5e444
--- /dev/null
+++ b/maintenance/importUseModWiki.php
@@ -0,0 +1,365 @@
+<?php
+
+/**
+ * Import data from a UseModWiki into a MediaWiki wiki
+ * 2003-02-09 Brion VIBBER <brion@pobox.com>
+ * Based loosely on Magnus's code from 2001-2002
+ *
+ * Updated limited version to get something working temporarily
+ * 2003-10-09
+ * Be sure to run the link & index rebuilding scripts!
+ *
+ * Some more munging for charsets etc
+ * 2003-11-28
+ *
+ * Partial fix for pages starting with lowercase letters (??)
+ * and CamelCase and /Subpage link conversion
+ * 2004-11-17
+ *
+ * Rewrite output to create Special:Export format for import
+ * instead of raw SQL. Should be 'future-proof' against future
+ * schema changes.
+ * 2005-03-14
+ *
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+if( php_sapi_name() != 'cli' ) {
+ echo "Please customize the settings and run me from the command line.";
+ die( -1 );
+}
+
+/** Set these correctly! */
+$wgImportEncoding = "CP1252"; /* We convert all to UTF-8 */
+$wgRootDirectory = "/kalman/Projects/wiki2002/wiki/lib-http/db/wiki";
+
+/* On a large wiki, you might run out of memory */
+@ini_set( 'memory_limit', '40M' );
+
+/* globals */
+$wgFieldSeparator = "\xb3"; # Some wikis may use different char
+ $FS = $wgFieldSeparator ;
+ $FS1 = $FS."1" ;
+ $FS2 = $FS."2" ;
+ $FS3 = $FS."3" ;
+
+# Unicode sanitization tools
+require_once( '../includes/normal/UtfNormal.php' );
+
+$usercache = array();
+
+importPages();
+
+# ------------------------------------------------------------------------------
+
+function importPages()
+{
+ global $wgRootDirectory;
+
+ $gt = '>';
+ echo <<<END
+<?xml version="1.0" encoding="UTF-8" ?$gt
+<mediawiki xmlns="http://www.mediawiki.org/xml/export-0.1/"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://www.mediawiki.org/xml/export-0.1/
+ http://www.mediawiki.org/xml/export-0.1.xsd"
+ version="0.1"
+ xml:lang="en">
+<!-- generated by importUseModWiki.php -->
+
+END;
+ $letters = array(
+ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I',
+ 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R',
+ 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 'other' );
+ foreach( $letters as $letter ) {
+ $dir = "$wgRootDirectory/page/$letter";
+ if( is_dir( $dir ) )
+ importPageDirectory( $dir );
+ }
+ echo <<<END
+</mediawiki>
+
+END;
+}
+
+function importPageDirectory( $dir, $prefix = "" )
+{
+ echo "\n<!-- Checking page directory " . xmlCommentSafe( $dir ) . " -->\n";
+ $mydir = opendir( $dir );
+ while( $entry = readdir( $mydir ) ) {
+ if( preg_match( '/^(.+)\.db$/', $entry, $m ) ) {
+ echo importPage( $prefix . $m[1] );
+ } else {
+ if( is_dir( "$dir/$entry" ) ) {
+ if( $entry != '.' && $entry != '..' ) {
+ importPageDirectory( "$dir/$entry", "$entry/" );
+ }
+ } else {
+ echo "<!-- File '" . xmlCommentSafe( $entry ) . "' doesn't seem to contain an article. Skipping. -->\n";
+ }
+ }
+ }
+}
+
+
+# ------------------------------------------------------------------------------
+
+/* fetch_ functions
+ Grab a given item from the database
+ */
+
+function useModFilename( $title ) {
+ $c = substr( $title, 0, 1 );
+ if(preg_match( '/[A-Z]/i', $c ) ) {
+ return strtoupper( $c ) . "/$title";
+ }
+ return "other/$title";
+}
+
+function fetchPage( $title )
+{
+ global $FS,$FS1,$FS2,$FS3, $wgRootDirectory;
+
+ $fname = $wgRootDirectory . "/page/" . useModFilename( $title ) . ".db";
+ if( !file_exists( $fname ) ) {
+ echo "Couldn't open file '$fname' for page '$title'.\n";
+ die( -1 );
+ }
+
+ $page = splitHash( $FS1, file_get_contents( $fname ) );
+ $section = splitHash( $FS2, $page["text_default"] );
+ $text = splitHash( $FS3, $section["data"] );
+
+ return array2object( array( "text" => $text["text"] , "summary" => $text["summary"] ,
+ "minor" => $text["minor"] , "ts" => $section["ts"] ,
+ "username" => $section["username"] , "host" => $section["host"] ) );
+}
+
+function fetchKeptPages( $title )
+{
+ global $FS,$FS1,$FS2,$FS3, $wgRootDirectory, $wgTimezoneCorrection;
+
+ $fname = $wgRootDirectory . "/keep/" . useModFilename( $title ) . ".kp";
+ if( !file_exists( $fname ) ) return array();
+
+ $keptlist = explode( $FS1, file_get_contents( $fname ) );
+ array_shift( $keptlist ); # Drop the junk at beginning of file
+
+ $revisions = array();
+ foreach( $keptlist as $rev ) {
+ $section = splitHash( $FS2, $rev );
+ $text = splitHash( $FS3, $section["data"] );
+ if ( $text["text"] && $text["minor"] != "" && ( $section["ts"]*1 > 0 ) ) {
+ array_push( $revisions, array2object( array ( "text" => $text["text"] , "summary" => $text["summary"] ,
+ "minor" => $text["minor"] , "ts" => $section["ts"] ,
+ "username" => $section["username"] , "host" => $section["host"] ) ) );
+ } else {
+ echo "<!-- skipped a bad old revision -->\n";
+ }
+ }
+ return $revisions;
+}
+
+function splitHash ( $sep , $str ) {
+ $temp = explode ( $sep , $str ) ;
+ $ret = array () ;
+ for ( $i = 0; $i+1 < count ( $temp ) ; $i++ ) {
+ $ret[$temp[$i]] = $temp[++$i] ;
+ }
+ return $ret ;
+ }
+
+
+/* import_ functions
+ Take a fetched item and produce SQL
+ */
+
+function checkUserCache( $name, $host )
+{
+ global $usercache;
+
+ if( $name ) {
+ if( in_array( $name, $usercache ) ) {
+ $userid = $usercache[$name];
+ } else {
+ # If we haven't imported user accounts
+ $userid = 0;
+ }
+ $username = str_replace( '_', ' ', $name );
+ } else {
+ $userid = 0;
+ $username = $host;
+ }
+ return array( $userid, $username );
+}
+
+function importPage( $title )
+{
+ global $usercache;
+
+ echo "\n<!-- Importing page " . xmlCommentSafe( $title ) . " -->\n";
+ $page = fetchPage( $title );
+
+ $newtitle = xmlsafe( str_replace( '_', ' ', recodeText( $title ) ) );
+
+ $munged = mungeFormat( $page->text );
+ if( $munged != $page->text ) {
+ /**
+ * Save a *new* revision with the conversion, and put the
+ * previous last version into the history.
+ */
+ $next = array2object( array(
+ 'text' => $munged,
+ 'minor' => 1,
+ 'username' => 'Conversion script',
+ 'host' => '127.0.0.1',
+ 'ts' => time(),
+ 'summary' => 'link fix',
+ ) );
+ $revisions = array( $page, $next );
+ } else {
+ /**
+ * Current revision:
+ */
+ $revisions = array( $page );
+ }
+ $xml = <<<END
+ <page>
+ <title>$newtitle</title>
+
+END;
+
+ # History
+ $revisions = array_merge( $revisions, fetchKeptPages( $title ) );
+ if(count( $revisions ) == 0 ) {
+ return $sql;
+ }
+
+ foreach( $revisions as $rev ) {
+ $text = xmlsafe( recodeText( $rev->text ) );
+ $minor = ($rev->minor ? '<minor/>' : '');
+ list( $userid, $username ) = checkUserCache( $rev->username, $rev->host );
+ $username = xmlsafe( recodeText( $username ) );
+ $timestamp = xmlsafe( timestamp2ISO8601( $rev->ts ) );
+ $comment = xmlsafe( recodeText( $rev->summary ) );
+
+ $xml .= <<<END
+ <revision>
+ <timestamp>$timestamp</timestamp>
+ <contributor><username>$username</username></contributor>
+ $minor
+ <comment>$comment</comment>
+ <text>$text</text>
+ </revision>
+
+END;
+ }
+ $xml .= "</page>\n\n";
+ return $xml;
+}
+
+# Whee!
+function recodeText( $string ) {
+ global $wgImportEncoding;
+ # For currently latin-1 wikis
+ $string = str_replace( "\r\n", "\n", $string );
+ $string = @iconv( $wgImportEncoding, "UTF-8", $string );
+ $string = wfMungeToUtf8( $string ); # Any old &#1234; stuff
+ return $string;
+}
+
+function wfUtf8Sequence($codepoint) {
+ if($codepoint < 0x80) return chr($codepoint);
+ if($codepoint < 0x800) return chr($codepoint >> 6 & 0x3f | 0xc0) .
+ chr($codepoint & 0x3f | 0x80);
+ if($codepoint < 0x10000) return chr($codepoint >> 12 & 0x0f | 0xe0) .
+ chr($codepoint >> 6 & 0x3f | 0x80) .
+ chr($codepoint & 0x3f | 0x80);
+ if($codepoint < 0x100000) return chr($codepoint >> 18 & 0x07 | 0xf0) . # Double-check this
+ chr($codepoint >> 12 & 0x3f | 0x80) .
+ chr($codepoint >> 6 & 0x3f | 0x80) .
+ chr($codepoint & 0x3f | 0x80);
+ # Doesn't yet handle outside the BMP
+ return "&#$codepoint;";
+}
+
+function wfMungeToUtf8($string) {
+ $string = preg_replace ( '/&#([0-9]+);/e', 'wfUtf8Sequence($1)', $string );
+ $string = preg_replace ( '/&#x([0-9a-f]+);/ie', 'wfUtf8Sequence(0x$1)', $string );
+ # Should also do named entities here
+ return $string;
+}
+
+function timestamp2ISO8601( $ts ) {
+ #2003-08-05T18:30:02Z
+ return gmdate( 'Y-m-d', $ts ) . 'T' . gmdate( 'H:i:s', $ts ) . 'Z';
+}
+
+function xmlsafe( $string ) {
+ /**
+ * The page may contain old data which has not been properly normalized.
+ * Invalid UTF-8 sequences or forbidden control characters will make our
+ * XML output invalid, so be sure to strip them out.
+ */
+ $string = UtfNormal::cleanUp( $string );
+
+ $string = htmlspecialchars( $string );
+ return $string;
+}
+
+function xmlCommentSafe( $text ) {
+ return str_replace( '--', '\\-\\-', xmlsafe( recodeText( $text ) ) );
+}
+
+
+function array2object( $arr ) {
+ $o = (object)0;
+ foreach( $arr as $x => $y ) {
+ $o->$x = $y;
+ }
+ return $o;
+}
+
+
+/**
+ * Make CamelCase and /Talk links work
+ */
+function mungeFormat( $text ) {
+ global $nowiki;
+ $nowiki = array();
+ $staged = preg_replace_callback(
+ '/(<nowiki>.*?<\\/nowiki>|(?:http|https|ftp):\\S+|\[\[[^]\\n]+]])/s',
+ 'nowikiPlaceholder', $text );
+
+ # This is probably not 100% correct, I'm just
+ # glancing at the UseModWiki code.
+ $upper = "[A-Z]";
+ $lower = "[a-z_0-9]";
+ $any = "[A-Za-z_0-9]";
+ $camel = "(?:$upper+$lower+$upper+$any*)";
+ $subpage = "(?:\\/$any+)";
+ $substart = "(?:\\/$upper$any*)";
+
+ $munged = preg_replace( "/(?!\\[\\[)($camel$subpage*|$substart$subpage*)\\b(?!\\]\\]|>)/",
+ '[[$1]]', $staged );
+
+ $final = preg_replace( '/' . preg_quote( placeholder() ) . '/es',
+ 'array_shift( $nowiki )', $munged );
+ return $final;
+}
+
+
+function placeholder( $x = null ) {
+ return '\xffplaceholder\xff';
+}
+
+function nowikiPlaceholder( $matches ) {
+ global $nowiki;
+ $nowiki[] = $matches[1];
+ return placeholder();
+}
+
+?>
diff --git a/maintenance/initStats.php b/maintenance/initStats.php
new file mode 100644
index 00000000..b622c3f0
--- /dev/null
+++ b/maintenance/initStats.php
@@ -0,0 +1,78 @@
+<?php
+
+/**
+ * Maintenance script to re-initialise or update the site statistics table
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Brion Vibber
+ * @author Rob Church <robchur@gmail.com>
+ * @licence GNU General Public Licence 2.0 or later
+ */
+
+$options = array( 'help', 'update', 'noviews' );
+require_once( 'commandLine.inc' );
+echo( "Refresh Site Statistics\n\n" );
+$dbr =& wfGetDB( DB_SLAVE );
+$fname = 'initStats';
+
+if( isset( $options['help'] ) ) {
+ showHelp();
+ exit();
+}
+
+echo( "Counting total edits..." );
+$edits = $dbr->selectField( 'revision', 'COUNT(*)', '', $fname );
+echo( "{$edits}\nCounting number of articles..." );
+
+global $wgContentNamespaces;
+$good = $dbr->selectField( 'page', 'COUNT(*)', array( 'page_namespace' => $wgContentNamespaces, 'page_is_redirect' => 0, 'page_len > 0' ), $fname );
+echo( "{$good}\nCounting total pages..." );
+
+$pages = $dbr->selectField( 'page', 'COUNT(*)', '', $fname );
+echo( "{$pages}\nCounting number of users..." );
+
+$users = $dbr->selectField( 'user', 'COUNT(*)', '', $fname );
+echo( "{$users}\nCounting number of admins..." );
+
+$admin = $dbr->selectField( 'user_groups', 'COUNT(*)', array( 'ug_group' => 'sysop' ), $fname );
+echo( "{$admin}\nCounting number of images..." );
+
+$image = $dbr->selectField( 'image', 'COUNT(*)', '', $fname );
+echo( "{$image}\n" );
+
+if( !isset( $options['noviews'] ) ) {
+ echo( "Counting total page views..." );
+ $views = $dbr->selectField( 'page', 'SUM(page_counter)', '', $fname );
+ echo( "{$views}\n" );
+}
+
+echo( "\nUpdating site statistics..." );
+
+$dbw =& wfGetDB( DB_MASTER );
+$values = array( 'ss_total_edits' => $edits,
+ 'ss_good_articles' => $good,
+ 'ss_total_pages' => $pages,
+ 'ss_users' => $users,
+ 'ss_admins' => $admin,
+ 'ss_images' => $image );
+$conds = array( 'ss_row_id' => 1 );
+$views = array( 'ss_total_views' => isset( $views ) ? $views : 0 );
+
+if( isset( $options['update'] ) ) {
+ $dbw->update( 'site_stats', $values, $conds, $fname );
+} else {
+ $dbw->delete( 'site_stats', $conds, $fname );
+ $dbw->insert( 'site_stats', array_merge( $values, $conds, $views ), $fname );
+}
+
+echo( "done.\n\n" );
+
+function showHelp() {
+ echo( "Re-initialise the site statistics tables.\n\n" );
+ echo( "Usage: php initStats.php [--update|--noviews]\n\n" );
+ echo( " --update : Update the existing statistics (preserves the ss_total_views field)\n" );
+ echo( "--noviews : Don't update the page view counter\n\n" );
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/interwiki.sql b/maintenance/interwiki.sql
new file mode 100644
index 00000000..ca656e46
--- /dev/null
+++ b/maintenance/interwiki.sql
@@ -0,0 +1,179 @@
+-- Based more or less on the public interwiki map from MeatballWiki
+-- Default interwiki prefixes...
+
+REPLACE INTO /*$wgDBprefix*/interwiki (iw_prefix,iw_url,iw_local) VALUES
+('abbenormal','http://www.ourpla.net/cgi-bin/pikie.cgi?$1',0),
+('acadwiki','http://xarch.tu-graz.ac.at/autocad/wiki/$1',0),
+('acronym','http://www.acronymfinder.com/af-query.asp?String=exact&Acronym=$1',0),
+('advogato','http://www.advogato.org/$1',0),
+('aiwiki','http://www.ifi.unizh.ch/ailab/aiwiki/aiw.cgi?$1',0),
+('alife','http://news.alife.org/wiki/index.php?$1',0),
+('annotation','http://bayle.stanford.edu/crit/nph-med.cgi/$1',0),
+('annotationwiki','http://www.seedwiki.com/page.cfm?wikiid=368&doc=$1',0),
+('arxiv','http://www.arxiv.org/abs/$1',0),
+('aspienetwiki','http://aspie.mela.de/Wiki/index.php?title=$1',0),
+('bemi','http://bemi.free.fr/vikio/index.php?$1',0),
+('benefitswiki','http://www.benefitslink.com/cgi-bin/wiki.cgi?$1',0),
+('brasilwiki','http://rio.ifi.unizh.ch/brasilienwiki/index.php/$1',0),
+('bridgeswiki','http://c2.com/w2/bridges/$1',0),
+('c2find','http://c2.com/cgi/wiki?FindPage&value=$1',0),
+('cache','http://www.google.com/search?q=cache:$1',0),
+('ciscavate','http://ciscavate.org/index.php/$1',0),
+('cliki','http://ww.telent.net/cliki/$1',0),
+('cmwiki','http://www.ourpla.net/cgi-bin/wiki.pl?$1',0),
+('codersbase','http://www.codersbase.com/$1',0),
+('commons','http://commons.wikimedia.org/wiki/$1',0),
+('consciousness','http://teadvus.inspiral.org/',0),
+('corpknowpedia','http://corpknowpedia.org/wiki/index.php/$1',0),
+('creationmatters','http://www.ourpla.net/cgi-bin/wiki.pl?$1',0),
+('dejanews','http://www.deja.com/=dnc/getdoc.xp?AN=$1',0),
+('demokraatia','http://wiki.demokraatia.ee/',0),
+('dictionary','http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=$1',0),
+('disinfopedia','http://www.disinfopedia.org/wiki.phtml?title=$1',0),
+('diveintoosx','http://diveintoosx.org/$1',0),
+('docbook','http://docbook.org/wiki/moin.cgi/$1',0),
+('dolphinwiki','http://www.object-arts.com/wiki/html/Dolphin/$1',0),
+('drumcorpswiki','http://www.drumcorpswiki.com/index.php/$1',0),
+('dwjwiki','http://www.suberic.net/cgi-bin/dwj/wiki.cgi?$1',0),
+('eĉei','http://www.ikso.net/cgi-bin/wiki.pl?$1',0),
+('echei','http://www.ikso.net/cgi-bin/wiki.pl?$1',0),
+('ecxei','http://www.ikso.net/cgi-bin/wiki.pl?$1',0),
+('efnetceewiki','http://purl.net/wiki/c/$1',0),
+('efnetcppwiki','http://purl.net/wiki/cpp/$1',0),
+('efnetpythonwiki','http://purl.net/wiki/python/$1',0),
+('efnetxmlwiki','http://purl.net/wiki/xml/$1',0),
+('eljwiki','http://elj.sourceforge.net/phpwiki/index.php/$1',0),
+('emacswiki','http://www.emacswiki.org/cgi-bin/wiki.pl?$1',0),
+('elibre','http://enciclopedia.us.es/index.php/$1',0),
+('eokulturcentro','http://esperanto.toulouse.free.fr/wakka.php?wiki=$1',0),
+('evowiki','http://www.evowiki.org/index.php/$1',0),
+('finalempire','http://final-empire.sourceforge.net/cgi-bin/wiki.pl?$1',0),
+('firstwiki','http://firstwiki.org/index.php/$1',0),
+('foldoc','http://www.foldoc.org/foldoc/foldoc.cgi?$1',0),
+('foxwiki','http://fox.wikis.com/wc.dll?Wiki~$1',0),
+('fr.be','http://fr.wikinations.be/$1',0),
+('fr.ca','http://fr.ca.wikinations.org/$1',0),
+('fr.fr','http://fr.fr.wikinations.org/$1',0),
+('fr.org','http://fr.wikinations.org/$1',0),
+('freebsdman','http://www.FreeBSD.org/cgi/man.cgi?apropos=1&query=$1',0),
+('gamewiki','http://gamewiki.org/wiki/index.php/$1',0),
+('gej','http://www.esperanto.de/cgi-bin/aktivikio/wiki.pl?$1',0),
+('gentoo-wiki','http://gentoo-wiki.com/$1',0),
+('globalvoices','http://cyber.law.harvard.edu/dyn/globalvoices/wiki/$1',0),
+('gmailwiki','http://www.gmailwiki.com/index.php/$1',0),
+('google','http://www.google.com/search?q=$1',0),
+('googlegroups','http://groups.google.com/groups?q=$1',0),
+('gotamac','http://www.got-a-mac.org/$1',0),
+('greencheese','http://www.greencheese.org/$1',0),
+('hammondwiki','http://www.dairiki.org/HammondWiki/index.php3?$1',0),
+('haribeau','http://wiki.haribeau.de/cgi-bin/wiki.pl?$1',0),
+('hewikisource','http://he.wikisource.org/wiki/$1',1),
+('herzkinderwiki','http://www.herzkinderinfo.de/Mediawiki/index.php/$1',0),
+('hrwiki','http://www.hrwiki.org/index.php/$1',0),
+('iawiki','http://www.IAwiki.net/$1',0),
+('imdb','http://us.imdb.com/Title?$1',0),
+('infosecpedia','http://www.infosecpedia.org/pedia/index.php/$1',0),
+('jargonfile','http://sunir.org/apps/meta.pl?wiki=JargonFile&redirect=$1',0),
+('jefo','http://www.esperanto-jeunes.org/vikio/index.php?$1',0),
+('jiniwiki','http://www.cdegroot.com/cgi-bin/jini?$1',0),
+('jspwiki','http://www.ecyrd.com/JSPWiki/Wiki.jsp?page=$1',0),
+('kerimwiki','http://wiki.oxus.net/$1',0),
+('kmwiki','http://www.voght.com/cgi-bin/pywiki?$1',0),
+('knowhow','http://www2.iro.umontreal.ca/~paquetse/cgi-bin/wiki.cgi?$1',0),
+('lanifexwiki','http://opt.lanifex.com/cgi-bin/wiki.pl?$1',0),
+('lasvegaswiki','http://wiki.gmnow.com/index.php/$1',0),
+('linuxwiki','http://www.linuxwiki.de/$1',0),
+('lojban','http://www.lojban.org/tiki/tiki-index.php?page=$1',0),
+('lqwiki','http://wiki.linuxquestions.org/wiki/$1',0),
+('lugkr','http://lug-kr.sourceforge.net/cgi-bin/lugwiki.pl?$1',0),
+('lutherwiki','http://www.lutheranarchives.com/mw/index.php/$1',0),
+('mathsongswiki','http://SeedWiki.com/page.cfm?wikiid=237&doc=$1',0),
+('mbtest','http://www.usemod.com/cgi-bin/mbtest.pl?$1',0),
+('meatball','http://www.usemod.com/cgi-bin/mb.pl?$1',0),
+('mediazilla','http://bugzilla.wikipedia.org/$1',1),
+('memoryalpha','http://www.memory-alpha.org/en/index.php/$1',0),
+('metaweb','http://www.metaweb.com/wiki/wiki.phtml?title=$1',0),
+('metawiki','http://sunir.org/apps/meta.pl?$1',0),
+('metawikipedia','http://meta.wikimedia.org/wiki/$1',0),
+('moinmoin','http://purl.net/wiki/moin/$1',0),
+('mozillawiki','http://wiki.mozilla.org/index.php/$1',0),
+('muweb','http://www.dunstable.com/scripts/MuWebWeb?$1',0),
+('netvillage','http://www.netbros.com/?$1',0),
+('oeis','http://www.research.att.com/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=$1',0),
+('openfacts','http://openfacts.berlios.de/index.phtml?title=$1',0),
+('openwiki','http://openwiki.com/?$1',0),
+('opera7wiki','http://nontroppo.org/wiki/$1',0),
+('orgpatterns','http://www.bell-labs.com/cgi-user/OrgPatterns/OrgPatterns?$1',0),
+('osi reference model','http://wiki.tigma.ee/',0),
+('pangalacticorg','http://www.pangalactic.org/Wiki/$1',0),
+('personaltelco','http://www.personaltelco.net/index.cgi/$1',0),
+('patwiki','http://gauss.ffii.org/$1',0),
+('phpwiki','http://phpwiki.sourceforge.net/phpwiki/index.php?$1',0),
+('pikie','http://pikie.darktech.org/cgi/pikie?$1',0),
+('pmeg','http://www.bertilow.com/pmeg/$1.php',0),
+('ppr','http://c2.com/cgi/wiki?$1',0),
+('purlnet','http://purl.oclc.org/NET/$1',0),
+('pythoninfo','http://www.python.org/cgi-bin/moinmoin/$1',0),
+('pythonwiki','http://www.pythonwiki.de/$1',0),
+('pywiki','http://www.voght.com/cgi-bin/pywiki?$1',0),
+('raec','http://www.raec.clacso.edu.ar:8080/raec/Members/raecpedia/$1',0),
+('revo','http://purl.org/NET/voko/revo/art/$1.html',0),
+('rfc','http://www.rfc-editor.org/rfc/rfc$1.txt',0),
+('s23wiki','http://is-root.de/wiki/index.php/$1',0),
+('scoutpedia','http://www.scoutpedia.info/index.php/$1',0),
+('seapig','http://www.seapig.org/$1',0),
+('seattlewiki','http://seattlewiki.org/wiki/$1',0),
+('seattlewireless','http://seattlewireless.net/?$1',0),
+('seeds','http://www.IslandSeeds.org/wiki/$1',0),
+('senseislibrary','http://senseis.xmp.net/?$1',0),
+('shakti','http://cgi.algonet.se/htbin/cgiwrap/pgd/ShaktiWiki/$1',0),
+('slashdot','http://slashdot.org/article.pl?sid=$1',0),
+('smikipedia','http://www.smikipedia.org/$1',0),
+('sockwiki','http://wiki.socklabs.com/$1',0),
+('sourceforge','http://sourceforge.net/$1',0),
+('squeak','http://minnow.cc.gatech.edu/squeak/$1',0),
+('strikiwiki','http://ch.twi.tudelft.nl/~mostert/striki/teststriki.pl?$1',0),
+('susning','http://www.susning.nu/$1',0),
+('svgwiki','http://www.protocol7.com/svg-wiki/default.asp?$1',0),
+('tavi','http://tavi.sourceforge.net/$1',0),
+('tejo','http://www.tejo.org/vikio/$1',0),
+('terrorwiki','http://www.liberalsagainstterrorism.com/wiki/index.php/$1',0),
+('tmbw','http://www.tmbw.net/wiki/index.php/$1',0),
+('tmnet','http://www.technomanifestos.net/?$1',0),
+('tmwiki','http://www.EasyTopicMaps.com/?page=$1',0),
+('turismo','http://www.tejo.org/turismo/$1',0),
+('theopedia','http://www.theopedia.com/$1',0),
+('twiki','http://twiki.org/cgi-bin/view/$1',0),
+('twistedwiki','http://purl.net/wiki/twisted/$1',0),
+('uea','http://www.tejo.org/uea/$1',0),
+('unreal','http://wiki.beyondunreal.com/wiki/$1',0),
+('ursine','http://ursine.ca/$1',0),
+('usej','http://www.tejo.org/usej/$1',0),
+('usemod','http://www.usemod.com/cgi-bin/wiki.pl?$1',0),
+('visualworks','http://wiki.cs.uiuc.edu/VisualWorks/$1',0),
+('warpedview','http://www.warpedview.com/index.php/$1',0),
+('webdevwikinl','http://www.promo-it.nl/WebDevWiki/index.php?page=$1',0),
+('webisodes','http://www.webisodes.org/$1',0),
+('webseitzwiki','http://webseitz.fluxent.com/wiki/$1',0),
+('why','http://clublet.com/c/c/why?$1',0),
+('wiki','http://c2.com/cgi/wiki?$1',0),
+('wikia','http://www.wikia.com/wiki/index.php/$1',0),
+('wikibooks','http://en.wikibooks.org/wiki/$1',1),
+('wikicities','http://www.wikicities.com/index.php/$1',0),
+('wikif1','http://www.wikif1.org/$1',0),
+('wikinfo','http://www.wikinfo.org/wiki.php?title=$1',0),
+('wikimedia','http://wikimediafoundation.org/wiki/$1',0),
+('wikiquote','http://en.wikiquote.org/wiki/$1',1),
+('wikinews','http://en.wikinews.org/wiki/$1',0),
+('wikisource','http://sources.wikipedia.org/wiki/$1',1),
+('wikispecies','http://species.wikipedia.org/wiki/$1',1),
+('wikitravel','http://wikitravel.org/en/$1',0),
+('wikiworld','http://WikiWorld.com/wiki/index.php/$1',0),
+('wiktionary','http://en.wiktionary.org/wiki/$1',1),
+('wlug','http://www.wlug.org.nz/$1',0),
+('wlwiki','http://winslowslair.supremepixels.net/wiki/index.php/$1',0),
+('ypsieyeball','http://sknkwrks.dyndns.org:1957/writewiki/wiki.pl?$1',0),
+('zwiki','http://www.zwiki.org/$1',0),
+('zzz wiki','http://wiki.zzz.ee/',0),
+('wikt','http://en.wiktionary.org/wiki/$1',1);
+
diff --git a/maintenance/lang2po.php b/maintenance/lang2po.php
new file mode 100644
index 00000000..af6bceea
--- /dev/null
+++ b/maintenance/lang2po.php
@@ -0,0 +1,154 @@
+<?php
+/**
+ * Convert Language files to .po files !
+ *
+ * Todo:
+ * - generate .po header
+ * - fix escaping of \
+ */
+
+/** This is a command line script */
+require_once('commandLine.inc');
+require_once('languages.inc');
+
+define('ALL_LANGUAGES', true);
+define('XGETTEXT_BIN', 'xgettext');
+define('MSGMERGE_BIN', 'msgmerge');
+
+// used to generate the .pot
+define('XGETTEXT_OPTIONS', '-n --keyword=wfMsg --keyword=wfMsgForContent --keyword=wfMsgHtml --keyword=wfMsgWikiHtml ');
+define('MSGMERGE_OPTIONS', ' -v ');
+
+define('LOCALE_OUTPUT_DIR', $IP.'/locale');
+
+
+if( isset($options['help']) ) { usage(); wfDie(); }
+// default output is WikiText
+if( !isset($options['lang']) ) { $options['lang'] = ALL_LANGUAGES; }
+
+function usage() {
+print <<<END
+Usage: php lang2po.php [--help] [--lang=<langcode>] [--stdout]
+ --help: this message.
+ --lang: a lang code you want to generate a .po for (default: all languages).
+
+END;
+}
+
+
+/**
+ * Return a dummy header for later edition.
+ * @return string A dummy header
+ */
+function poHeader() {
+return
+'# SOME DESCRIPTIVE TITLE.
+# Copyright (C) 2005 MediaWiki
+# This file is distributed under the same license as the MediaWiki package.
+# FIRST AUTHOR <EMAIL@ADDRESS>, YEAR.
+#
+#, fuzzy
+msgid ""
+msgstr ""
+"Project-Id-Version: PACKAGE VERSION\n"
+"Report-Msgid-Bugs-To: bugzilllaaaaa\n"
+"POT-Creation-Date: 2005-08-16 20:13+0200\n"
+"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n"
+"Last-Translator: VARIOUS <nobody>\n"
+"Language-Team: LANGUAGE <nobody>\n"
+"MIME-Version: 1.0\n"
+"Content-Type: text/plain; charset=UTF-8\n"
+"Content-Transfer-Encoding: 8bit\n"
+';
+}
+
+/**
+ * generate and write a file in .po format.
+ *
+ * @param string $langcode Code of a language it will process.
+ * @param array &$messages Array containing the various messages.
+ * @return string Filename where stuff got saved or false.
+ */
+function generatePo($langcode, &$messages) {
+ $data = poHeader();
+
+ // Generate .po entries
+ foreach($messages as $identifier => $content) {
+ $data .= "msgid \"$identifier\"\n";
+
+ // Escape backslashes
+ $tmp = str_replace('\\', '\\\\', $content);
+ // Escape doublelquotes
+ $tmp = preg_replace( "/(?<!\\\\)\"/", '\"', $tmp);
+ // Rewrite multilines to gettext format
+ $tmp = str_replace("\n", "\"\n\"", $tmp);
+
+ $data .= 'msgstr "'. $tmp . "\"\n\n";
+ }
+
+ // Write the content to a file in locale/XX/messages.po
+ $dir = LOCALE_OUTPUT_DIR.'/'.$langcode;
+ if( !is_dir($dir) ) { mkdir( $dir, 0770 ); }
+ $filename = $dir.'/fromlanguagefile.po';
+
+ $file = fopen( $filename , 'wb' );
+ if( fwrite( $file, $data ) ) {
+ fclose( $file );
+ return $filename;
+ } else {
+ fclose( $file );
+ return false;
+ }
+}
+
+function generatePot() {
+ global $IP;
+ $curdir = getcwd();
+ chdir($IP);
+ exec( XGETTEXT_BIN
+ .' '.XGETTEXT_OPTIONS
+ .' -o '.LOCALE_OUTPUT_DIR.'/wfMsg.pot'
+ .' includes/*php'
+ );
+ chdir($curdir);
+}
+
+function applyPot($langcode) {
+ $langdir = LOCALE_OUTPUT_DIR.'/'.$langcode;
+
+ $from = $langdir.'/fromlanguagefile.po';
+ $pot = LOCALE_OUTPUT_DIR.'/wfMsg.pot';
+ $dest = $langdir.'/messages.po';
+
+ // Merge template and generate file to get final .po
+ exec(MSGMERGE_BIN.MSGMERGE_OPTIONS." $from $pot -o $dest ");
+ // delete no more needed file
+// unlink($from);
+}
+
+// Generate a template .pot based on source tree
+echo "Getting 'gettext' default messages from sources:";
+generatePot();
+echo "done.\n";
+
+
+$langTool = new languages();
+
+// Do all languages
+foreach ( $langTool->getList() as $langcode) {
+ echo "Loading messages for $langcode:\t";
+ require_once( 'languages/Language' . $langcode . '.php' );
+ $arr = 'wgAllMessages'.$langcode;
+ if(!@is_array($$arr)) {
+ echo "NONE FOUND\n";
+ } else {
+ echo "ok\n";
+ if( ! generatePo($langcode, $$arr) ) {
+ echo "ERROR: Failed to wrote file.\n";
+ } else {
+ echo "Applying template:";
+ applyPot($langcode);
+ }
+ }
+}
+?>
diff --git a/maintenance/langmemusage.php b/maintenance/langmemusage.php
new file mode 100644
index 00000000..d45de0e4
--- /dev/null
+++ b/maintenance/langmemusage.php
@@ -0,0 +1,30 @@
+<?php
+/**
+ * Dumb program that tries to get the memory usage
+ * for each language file.
+ */
+
+/** This is a command line script */
+require_once('commandLine.inc');
+require_once('languages.inc');
+
+$langtool = new languages();
+
+if ( ! function_exists( 'memory_get_usage' ) )
+ wfDie( "You must compile PHP with --enable-memory-limit\n" );
+
+$memlast = $memstart = memory_get_usage();
+
+print 'Base memory usage: '.$memstart."\n";
+
+foreach($langtool->getList() as $langcode) {
+ require_once('languages/Language'.$langcode.'.php');
+ $memstep = memory_get_usage();
+ printf( "%12s: %d\n", $langcode, ($memstep- $memlast) );
+ $memlast = $memstep;
+}
+
+$memend = memory_get_usage();
+
+echo ' Total Usage: '.($memend - $memstart)."\n";
+?>
diff --git a/maintenance/languages.inc b/maintenance/languages.inc
new file mode 100644
index 00000000..e318259d
--- /dev/null
+++ b/maintenance/languages.inc
@@ -0,0 +1,48 @@
+<?php
+/**
+ * Library to grab data from languages files
+ *
+ * WORK IN PROGRESS. There is some bugs when including the same
+ * file multiple time :(((
+ */
+require_once('commandLine.inc');
+
+class languages {
+ /** Contain the list of languages available */
+ var $list = array();
+ /** some messages for the current lang */
+ var $messages = array();
+
+ function languages() {
+ $this->clear();
+ $this->loadList();
+ }
+
+ function clear() {
+ $this->list = array();
+ $this->messages = array();
+ }
+
+ function loadList() {
+ global $IP;
+ $this->list = array();
+
+ // available language files
+ $dir = opendir("$IP/languages");
+ while ($file = readdir($dir)) {
+ if (preg_match("/Language([^.]*?)\.php$/", $file, $m)) {
+ $this->list[] = $m[1];
+ }
+ }
+ sort($this->list);
+
+ // Cleanup file list
+ foreach($this->list as $key => $lang) {
+ if ($lang == 'Utf8' || $lang == '' || $lang == 'Converter')
+ unset($this->list[$key]);
+ }
+ }
+
+ function getList() { return $this->list; }
+}
+?>
diff --git a/maintenance/mcc.php b/maintenance/mcc.php
new file mode 100644
index 00000000..93b6ec18
--- /dev/null
+++ b/maintenance/mcc.php
@@ -0,0 +1,173 @@
+<?php
+/**
+ * memcached diagnostic tool
+ *
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+require_once( 'commandLine.inc' );
+require_once( 'memcached-client.php' );
+
+$mcc = new memcached( array('persistant' => true/*, 'debug' => true*/) );
+$mcc->set_servers( $wgMemCachedServers );
+#$mcc->set_debug( true );
+
+function mccShowHelp($command) {
+
+ if(! $command ) { $command = 'fullhelp'; }
+ $onlyone = true;
+
+ switch ( $command ) {
+
+ case 'fullhelp':
+ // will show help for all commands
+ $onlyone = false;
+
+ case 'get':
+ print "get: grabs something\n";
+ if($onlyone) { break; }
+
+ case 'getsock':
+ print "getsock: lists sockets\n";
+ if($onlyone) { break; }
+
+ case 'set':
+ print "set: changes something\n";
+ if($onlyone) { break; }
+
+ case 'delete':
+ print "delete: deletes something\n";
+ if($onlyone) { break; }
+
+ case 'history':
+ print "history: show command line history\n";
+ if($onlyone) { break; }
+
+ case 'server':
+ print "server: show current memcached server\n";
+ if($onlyone) { break; }
+
+ case 'dumpmcc':
+ print "dumpmcc: shows the whole thing\n";
+ if($onlyone) { break; }
+
+ case 'exit':
+ case 'quit':
+ print "exit or quit: exit mcc\n";
+ if($onlyone) { break; }
+
+ case 'help':
+ print "help: help about a command\n";
+ if($onlyone) { break; }
+
+ default:
+ if($onlyone) {
+ print "$command: command does not exist or no help for it\n";
+ }
+ }
+}
+
+do {
+ $bad = false;
+ $showhelp = false;
+ $quit = false;
+
+ $line = readconsole( '> ' );
+ if ($line === false) exit;
+
+ $args = explode( ' ', $line );
+ $command = array_shift( $args );
+
+ // process command
+ switch ( $command ) {
+ case 'help':
+ // show an help message
+ mccShowHelp(array_shift($args));
+ break;
+
+ case 'get':
+ print "Getting {$args[0]}[{$args[1]}]\n";
+ $res = $mcc->get( $args[0] );
+ if ( array_key_exists( 1, $args ) ) {
+ $res = $res[$args[1]];
+ }
+ if ( $res === false ) {
+ #print 'Error: ' . $mcc->error_string() . "\n";
+ print "MemCached error\n";
+ } elseif ( is_string( $res ) ) {
+ print "$res\n";
+ } else {
+ var_dump( $res );
+ }
+ break;
+
+ case 'getsock':
+ $res = $mcc->get( $args[0] );
+ $sock = $mcc->get_sock( $args[0] );
+ var_dump( $sock );
+ break;
+
+ case 'server':
+ $res = $mcc->get( $args[0] );
+ print $mcc->_buckets[$mcc->_hashfunc( $args[0] ) % $mcc->_bucketcount] . "\n";
+ break;
+
+ case 'set':
+ $key = array_shift( $args );
+ if ( $args[0] == "#" && is_numeric( $args[1] ) ) {
+ $value = str_repeat( '*', $args[1] );
+ } else {
+ $value = implode( ' ', $args );
+ }
+ if ( !$mcc->set( $key, $value, 0 ) ) {
+ #print 'Error: ' . $mcc->error_string() . "\n";
+ print "MemCached error\n";
+ }
+ break;
+
+ case 'delete':
+ $key = implode( ' ', $args );
+ if ( !$mcc->delete( $key ) ) {
+ #print 'Error: ' . $mcc->error_string() . "\n";
+ print "MemCached error\n";
+ }
+ break;
+
+ case 'history':
+ if ( function_exists( 'readline_list_history' ) ) {
+ foreach( readline_list_history() as $num => $line) {
+ print "$num: $line\n";
+ }
+ } else {
+ print "readline_list_history() not available\n";
+ }
+ break;
+
+ case 'dumpmcc':
+ var_dump( $mcc );
+ break;
+
+ case 'quit':
+ case 'exit':
+ $quit = true;
+ break;
+
+ default:
+ $bad = true;
+ } // switch() end
+
+ if ( $bad ) {
+ if ( $command ) {
+ print "Bad command\n";
+ }
+ } else {
+ if ( function_exists( 'readline_add_history' ) ) {
+ readline_add_history( $line );
+ }
+ }
+} while ( !$quit );
+
+?>
diff --git a/maintenance/mctest.php b/maintenance/mctest.php
new file mode 100644
index 00000000..95249b29
--- /dev/null
+++ b/maintenance/mctest.php
@@ -0,0 +1,59 @@
+<?php
+/* $Id: mctest.php 12896 2006-01-28 08:22:24Z timstarling $ */
+
+$optionsWithArgs = array( 'i' );
+
+require_once('commandLine.inc');
+
+#$wgDebugLogFile = '/dev/stdout';
+
+if ( isset( $args[0] ) ) {
+ $wgMemCachedServers = array( $args[0] );
+} else {
+ $wgMemCachedServers[] = 'localhost';
+}
+if ( isset( $options['i'] ) ) {
+ $iterations = $options['i'];
+} else {
+ $iterations = 100;
+}
+
+foreach ( $wgMemCachedServers as $server ) {
+ print "$server ";
+ $mcc = new MemCachedClientforWiki( array('persistant' => true) );
+ $mcc->set_servers( array( $server ) );
+ $set = 0;
+ $incr = 0;
+ $get = 0;
+ $time_start=microtime_float();
+ for ( $i=1; $i<=$iterations; $i++ ) {
+ if ( !is_null( $mcc->set( "test$i", $i ) ) ) {
+ $set++;
+ }
+ }
+
+ for ( $i=1; $i<=$iterations; $i++ ) {
+ if ( !is_null( $mcc->incr( "test$i", $i ) ) ) {
+ $incr++;
+ }
+ }
+
+ for ( $i=1; $i<=$iterations; $i++ ) {
+ $value = $mcc->get( "test$i" );
+ if ( $value == $i*2 ) {
+ $get++;
+ }
+ }
+ $exectime=microtime_float()-$time_start;
+
+ print "set: $set incr: $incr get: $get time: $exectime\n";
+}
+
+function microtime_float()
+{
+ list($usec, $sec) = explode(" ", microtime());
+ return ((float)$usec + (float)$sec);
+}
+
+
+?>
diff --git a/maintenance/moveBatch.php b/maintenance/moveBatch.php
new file mode 100644
index 00000000..8d7141cd
--- /dev/null
+++ b/maintenance/moveBatch.php
@@ -0,0 +1,85 @@
+<?php
+
+# Move a batch of pages
+# Usage: php moveBatch.php [-u <user>] [-r <reason>] [-i <interval>] <listfile>
+# where
+# <listfile> is a file where each line has two titles separated by a pipe
+# character. The first title is the source, the second is the destination.
+# <user> is the username
+# <reason> is the move reason
+# <interval> is the number of seconds to sleep for after each move
+
+$oldCwd = getcwd();
+$optionsWithArgs = array( 'u', 'r', 'i' );
+require_once( 'commandLine.inc' );
+
+chdir( $oldCwd );
+
+# Options processing
+
+$filename = 'php://stdin';
+$user = 'Move page script';
+$reason = '';
+$interval = 0;
+
+if ( isset( $args[0] ) ) {
+ $filename = $args[0];
+}
+if ( isset( $options['u'] ) ) {
+ $user = $options['u'];
+}
+if ( isset( $options['r'] ) ) {
+ $reason = $options['r'];
+}
+if ( isset( $options['i'] ) ) {
+ $interval = $options['i'];
+}
+
+$wgUser = User::newFromName( $user );
+
+
+# Setup complete, now start
+
+$file = fopen( $filename, 'r' );
+if ( !$file ) {
+ print "Unable to read file, exiting\n";
+ exit;
+}
+
+$dbw =& wfGetDB( DB_MASTER );
+
+for ( $linenum = 1; !feof( $file ); $linenum++ ) {
+ $line = fgets( $file );
+ if ( $line === false ) {
+ break;
+ }
+ $parts = array_map( 'trim', explode( '|', $line ) );
+ if ( count( $parts ) != 2 ) {
+ print "Error on line $linenum, no pipe character\n";
+ continue;
+ }
+ $source = Title::newFromText( $parts[0] );
+ $dest = Title::newFromText( $parts[1] );
+ if ( is_null( $source ) || is_null( $dest ) ) {
+ print "Invalid title on line $linenum\n";
+ continue;
+ }
+
+
+ print $source->getPrefixedText();
+ $dbw->begin();
+ $err = $source->moveTo( $dest, false, $reason );
+ if( $err !== true ) {
+ print "\nFAILED: $err";
+ }
+ $dbw->immediateCommit();
+ print "\n";
+
+ if ( $interval ) {
+ sleep( $interval );
+ }
+ wfWaitForSlaves( 5 );
+}
+
+
+?>
diff --git a/maintenance/mwdocgen.php b/maintenance/mwdocgen.php
new file mode 100644
index 00000000..de1a7d96
--- /dev/null
+++ b/maintenance/mwdocgen.php
@@ -0,0 +1,205 @@
+<?php
+/**
+ * Script to easily generate the mediawiki documentation using doxygen.
+ *
+ * By default it will generate the whole documentation but you will be able to
+ * generate just some parts.
+ *
+ * Usage:
+ * php mwdocgen.php
+ *
+ * Then make a selection from the menu
+ *
+ * KNOWN BUGS:
+ *
+ * - pass_thru seems to always use buffering (even with ob_implicit_flush()),
+ * that make output slow when doxygen parses language files.
+ * - the menu doesnt work, got disabled at revision 13740. Need to code it.
+ *
+ *
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ *
+ * @author Ashar Voultoiz <thoane@altern.org>
+ * @version first release
+ */
+
+#
+# Variables / Configuration
+#
+
+if( php_sapi_name() != 'cli' ) {
+ echo 'Run me from the command line.';
+ die( -1 );
+}
+
+/** Figure out the base directory for MediaWiki location */
+$mwPath = dirname( dirname( __FILE__ ) ) . DIRECTORY_SEPARATOR;
+
+/** Global variable: temporary directory */
+$tmpPath = '/tmp/';
+
+/** doxygen binary script */
+$doxygenBin = 'doxygen';
+
+/** doxygen configuration template for mediawiki */
+$doxygenTemplate = $mwPath . 'maintenance/Doxyfile';
+
+/** where Phpdoc should output documentation */
+#$doxyOutput = '/var/www/mwdoc/';
+$doxyOutput = $mwPath . 'docs' . DIRECTORY_SEPARATOR ;
+
+/** MediaWiki subpaths */
+$mwPathI = $mwPath.'includes/';
+$mwPathL = $mwPath.'languages/';
+$mwPathM = $mwPath.'maintenance/';
+$mwPathS = $mwPath.'skins/';
+
+/** Variable to get user input */
+$input = '';
+
+/** shell command that will be run */
+$command = $doxygenBin;
+
+#
+# Functions
+#
+
+function readaline( $prompt = '') {
+ print $prompt;
+ $fp = fopen( "php://stdin", "r" );
+ $resp = trim( fgets( $fp, 1024 ) );
+ fclose( $fp );
+ return $resp;
+ }
+
+/**
+ * Generate a configuration file given user parameters and return the temporary filename.
+ * @param $doxygenTemplate String: full path for the template.
+ * @param $outputDirectory String: directory where the stuff will be output.
+ * @param $stripFromPath String: path that should be stripped out (usually mediawiki base path).
+ * @param $input String: Path to analyze.
+ */
+function generateConfigFile($doxygenTemplate, $outputDirectory, $stripFromPath, $input) {
+ global $tmpPath ;
+
+ $template = file_get_contents($doxygenTemplate);
+
+ // Replace template placeholders by correct values.
+ $tmpCfg = str_replace(
+ array(
+ '{{OUTPUT_DIRECTORY}}',
+ '{{STRIP_FROM_PATH}}',
+ '{{INPUT}}',
+ ),
+ array(
+ $outputDirectory,
+ $stripFromPath,
+ $input,
+ ),
+ $template
+ );
+ $tmpFileName = $tmpPath . 'mwdocgen'. rand() .'.tmp';
+ file_put_contents( $tmpFileName , $tmpCfg ) or die("Could not write doxygen configuration to file $tmpFileName\n");
+
+ return $tmpFileName;
+}
+
+#
+# Main !
+#
+
+unset( $file );
+
+if( is_array( $argv ) && isset( $argv[1] ) ) {
+ switch( $argv[1] ) {
+ case '--all': $input = 0; break;
+ case '--includes': $input = 1; break;
+ case '--languages': $input = 2; break;
+ case '--maintenance': $input = 3; break;
+ case '--skins': $input = 4; break;
+ case '--file':
+ $input = 5;
+ if( isset( $argv[2] ) ) {
+ $file = $argv[2];
+ }
+ break;
+ }
+}
+
+if( $input === '' ) {
+?>Several documentation possibilities:
+ 0 : whole documentation (1 + 2 + 3 + 4)
+ 1 : only includes
+ 2 : only languages
+ 3 : only maintenance
+ 4 : only skins
+ 5 : only a given file<?php
+ while ( !is_numeric($input) )
+ {
+ $input = readaline( "\nEnter your choice [0]:" );
+ if($input == '') {
+ $input = 0;
+ }
+ }
+}
+/*
+switch ($input) {
+case 0:
+ $command .= " -f $mwBaseFiles -d $mwPathI,$mwPathL,$mwPathM,$mwPathS";
+ break;
+case 1:
+ $command .= "-d $mwPathI";
+ break;
+case 2:
+ $command .= "-d $mwPathL";
+ break;
+case 3:
+ $command .= "-d $mwPathM";
+ break;
+case 4:
+ $command .= "-d $mwPathS";
+ break;
+case 5:
+ if( !isset( $file ) ) {
+ $file = readaline("Enter file name $mwPath");
+ }
+ $command .= ' -f '.$mwPath.$file;
+}
+
+$command .= " -t $pdOutput ".$pdOthers;
+
+*/
+
+// TODO : generate a list of paths ))
+$input = $mwPath;
+
+$generatedConf = generateConfigFile($doxygenTemplate, $doxyOutput, $mwPath, $input );
+$command = $doxygenBin . ' ' . $generatedConf ;
+
+?>
+---------------------------------------------------
+Launching the command:
+
+<?php echo $command ?>
+
+---------------------------------------------------
+<?php
+
+passthru($command);
+
+?>
+---------------------------------------------------
+Doxygen execution finished.
+Check above for possible errors.
+
+You might want to deleted the temporary file <?php echo $generatedConf; ?>
+
+<?php
+
+# phpdoc -d ./mediawiki/includes/ ./mediawiki/maintenance/ -f ./mediawiki/*php -t ./mwdoc/ -dn 'MediaWiki' --title 'MediaWiki generated documentation' -o 'HTML:frames:DOM/earthli'
+
+# phpdoc -f ./mediawiki/includes/GlobalFunctions.php -t ./mwdoc/ -dn 'MediaWiki' --title 'MediaWiki generated documentation' -o 'HTML:frames:DOM/earthli'
+
+?>
diff --git a/maintenance/mwdoxygen.cfg b/maintenance/mwdoxygen.cfg
new file mode 100644
index 00000000..39fae228
--- /dev/null
+++ b/maintenance/mwdoxygen.cfg
@@ -0,0 +1,1136 @@
+# Doxyfile 1.4.3-20050530
+
+#
+# NOTE: this configuration assume you are running doxygen from the
+# mediawiki root directory. For example:
+# ~/dev/mediawiki-HEAD/
+# The easiest way is to get in the maintenance directory and then:
+# make doxydoc
+#
+# Paths visited are configured by the INPUT variable (around line 450)
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for the MediaWiki project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+PROJECT_NAME = MediaWiki
+PROJECT_NUMBER = 1.6-cvs
+OUTPUT_DIRECTORY = docs
+
+# 2 levels directories, create 4096 of them!
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish,
+# Dutch, Finnish, French, German, Greek, Hungarian, Italian, Japanese,
+# Japanese-en (Japanese with English messages), Korean, Korean-en, Norwegian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Slovak, Slovene, Spanish,
+# Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE = English
+
+USE_WINDOWS_ENCODING = NO
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+ALWAYS_DETAILED_SEC = YES
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful is your file systems
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like the Qt-style comments (thus requiring an
+# explicit @brief command for a brief description.
+JAVADOC_AUTOBRIEF = YES
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member
+# documentation.
+DETAILS_AT_TOP = YES
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+INHERIT_DOCS = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+DISTRIBUTE_GROUP_DOC = NO
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+ALIASES =
+
+
+OPTIMIZE_OUTPUT_FOR_C = NO
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+SUBGROUPING = YES
+
+
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+EXTRACT_LOCAL_METHODS = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+SHOW_INCLUDE_FILES = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+SORT_BY_SCOPE_NAME = NO
+
+GENERATE_TODOLIST = YES
+GENERATE_TESTLIST = YES
+GENERATE_BUGLIST = YES
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or define consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and defines in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation.
+SHOW_DIRECTORIES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from the
+# version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the progam writes to standard output
+# is used as the file version. See the manual for examples.
+FILE_VERSION_FILTER =
+
+
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+QUIET = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be abled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+# should be run from maintenance
+# FIXME : includes/normal includes/templates languages are missing
+INPUT = config includes maintenance skins tests
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx
+# *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm
+
+FILE_PATTERNS = *.php *.inc
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix filesystem feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+
+EXCLUDE_PATTERNS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output. If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty, INPUT_FILTER
+# is applied to all files.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default)
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default)
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader. This is useful
+# if you want to understand what is going on. On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = NO
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_PREDEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all function-like macros that are alone
+# on a line, have an all uppercase name, and do not end with a semicolon. Such
+# function macros are typically used for boiler-plate code, and will confuse
+# the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option is superseded by the HAVE_DOT option below. This is only a
+# fallback. It is recommended to install and use dot, since it yields more
+# powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = NO
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will
+# generate a call dependency graph for every global function or class method.
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+
+CALL_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_WIDTH = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than
+# this value, doxygen will try to truncate the graph, so that it fits within
+# the specified constraint. Beware that most browsers cannot cope with very
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that a graph may be further truncated if the graph's
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default),
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, which results in a white background.
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE = NO
diff --git a/maintenance/mysql5/tables.sql b/maintenance/mysql5/tables.sql
new file mode 100644
index 00000000..cc6818d3
--- /dev/null
+++ b/maintenance/mysql5/tables.sql
@@ -0,0 +1,1009 @@
+-- Experimental table definitions for MySQL 4.1 and 5.0 with
+-- explicit character set support. Not fully tested, may have
+-- surprises!
+--
+-- TODO: Test various fields
+-- TODO: Anything else need to be moved to VARBINARY and BLOB?
+-- TODO: UCS-2 better than UTF-8?
+-- TODO: Find out how to get 4-byte UTF-8 chars into MySQL...
+-- An alternate UCS-2 that does UTF-16 conversion would work.
+-- TODO: Work on collation usage
+
+-- ------------------------------------------------------------
+
+-- SQL to create the initial tables for the MediaWiki database.
+-- This is read and executed by the install script; you should
+-- not have to run it by itself unless doing a manual install.
+
+--
+-- General notes:
+--
+-- If possible, create tables as InnoDB to benefit from the
+-- superior resiliency against crashes and ability to read
+-- during writes (and write during reads!)
+--
+-- Only the 'searchindex' table requires MyISAM due to the
+-- requirement for fulltext index support, which is missing
+-- from InnoDB.
+--
+--
+-- The MySQL table backend for MediaWiki currently uses
+-- 14-character CHAR or VARCHAR fields to store timestamps.
+-- The format is YYYYMMDDHHMMSS, which is derived from the
+-- text format of MySQL's TIMESTAMP fields.
+--
+-- Historically TIMESTAMP fields were used, but abandoned
+-- in early 2002 after a lot of trouble with the fields
+-- auto-updating.
+--
+-- The PostgreSQL backend uses DATETIME fields for timestamps,
+-- and we will migrate the MySQL definitions at some point as
+-- well.
+--
+--
+-- The /*$wgDBprefix*/ comments in this and other files are
+-- replaced with the defined table prefix by the installer
+-- and updater scripts. If you are installing or running
+-- updates manually, you will need to manually insert the
+-- table prefix if any when running these scripts.
+--
+
+
+--
+-- The user table contains basic account information,
+-- authentication keys, etc.
+--
+-- Some multi-wiki sites may share a single central user table
+-- between separate wikis using the $wgSharedDB setting.
+--
+-- Note that when a external authentication plugin is used,
+-- user table entries still need to be created to store
+-- preferences and to key tracking information in the other
+-- tables.
+--
+CREATE TABLE /*$wgDBprefix*/user (
+ user_id int(5) unsigned NOT NULL auto_increment,
+
+ -- Usernames must be unique, must not be in the form of
+ -- an IP address. _Shouldn't_ allow slashes or case
+ -- conflicts. Spaces are allowed, and are _not_ converted
+ -- to underscores like titles. See the User::newFromName() for
+ -- the specific tests that usernames have to pass.
+ user_name varchar(255) binary NOT NULL default '',
+
+ -- Optional 'real name' to be displayed in credit listings
+ user_real_name varchar(255) binary NOT NULL default '',
+
+ -- Password hashes, normally hashed like so:
+ -- MD5(CONCAT(user_id,'-',MD5(plaintext_password))), see
+ -- wfEncryptPassword() in GlobalFunctions.php
+ user_password tinyblob NOT NULL default '',
+
+ -- When using 'mail me a new password', a random
+ -- password is generated and the hash stored here.
+ -- The previous password is left in place until
+ -- someone actually logs in with the new password,
+ -- at which point the hash is moved to user_password
+ -- and the old password is invalidated.
+ user_newpassword tinyblob NOT NULL default '',
+
+ -- Note: email should be restricted, not public info.
+ -- Same with passwords.
+ user_email tinytext NOT NULL default '',
+
+ -- Newline-separated list of name=value defining the user
+ -- preferences
+ user_options blob NOT NULL default '',
+
+ -- This is a timestamp which is updated when a user
+ -- logs in, logs out, changes preferences, or performs
+ -- some other action requiring HTML cache invalidation
+ -- to ensure that the UI is updated.
+ user_touched char(14) binary NOT NULL default '',
+
+ -- A pseudorandomly generated value that is stored in
+ -- a cookie when the "remember password" feature is
+ -- used (previously, a hash of the password was used, but
+ -- this was vulnerable to cookie-stealing attacks)
+ user_token char(32) binary NOT NULL default '',
+
+ -- Initially NULL; when a user's e-mail address has been
+ -- validated by returning with a mailed token, this is
+ -- set to the current timestamp.
+ user_email_authenticated CHAR(14) BINARY,
+
+ -- Randomly generated token created when the e-mail address
+ -- is set and a confirmation test mail sent.
+ user_email_token CHAR(32) BINARY,
+
+ -- Expiration date for the user_email_token
+ user_email_token_expires CHAR(14) BINARY,
+
+ -- Timestamp of account registration.
+ -- Accounts predating this schema addition may contain NULL.
+ user_registration CHAR(14) BINARY,
+
+ PRIMARY KEY user_id (user_id),
+ UNIQUE INDEX user_name (user_name),
+ INDEX (user_email_token)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- User permissions have been broken out to a separate table;
+-- this allows sites with a shared user table to have different
+-- permissions assigned to a user in each project.
+--
+-- This table replaces the old user_rights field which used a
+-- comma-separated blob.
+--
+CREATE TABLE /*$wgDBprefix*/user_groups (
+ -- Key to user_id
+ ug_user int(5) unsigned NOT NULL default '0',
+
+ -- Group names are short symbolic string keys.
+ -- The set of group names is open-ended, though in practice
+ -- only some predefined ones are likely to be used.
+ --
+ -- At runtime $wgGroupPermissions will associate group keys
+ -- with particular permissions. A user will have the combined
+ -- permissions of any group they're explicitly in, plus
+ -- the implicit '*' and 'user' groups.
+ ug_group char(16) NOT NULL default '',
+
+ PRIMARY KEY (ug_user,ug_group),
+ KEY (ug_group)
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+-- Stores notifications of user talk page changes, for the display
+-- of the "you have new messages" box
+CREATE TABLE /*$wgDBprefix*/user_newtalk (
+ -- Key to user.user_id
+ user_id int(5) NOT NULL default '0',
+ -- If the user is an anonymous user hir IP address is stored here
+ -- since the user_id of 0 is ambiguous
+ user_ip varchar(40) NOT NULL default '',
+ INDEX user_id (user_id),
+ INDEX user_ip (user_ip)
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Core of the wiki: each page has an entry here which identifies
+-- it by title and contains some essential metadata.
+--
+CREATE TABLE /*$wgDBprefix*/page (
+ -- Unique identifier number. The page_id will be preserved across
+ -- edits and rename operations, but not deletions and recreations.
+ page_id int(8) unsigned NOT NULL auto_increment,
+
+ -- A page name is broken into a namespace and a title.
+ -- The namespace keys are UI-language-independent constants,
+ -- defined in includes/Defines.php
+ page_namespace int NOT NULL,
+
+ -- The rest of the title, as text.
+ -- Spaces are transformed into underscores in title storage.
+ page_title varchar(255) binary NOT NULL,
+
+ -- Comma-separated set of permission keys indicating who
+ -- can move or edit the page.
+ page_restrictions tinyblob NOT NULL default '',
+
+ -- Number of times this page has been viewed.
+ page_counter bigint(20) unsigned NOT NULL default '0',
+
+ -- 1 indicates the article is a redirect.
+ page_is_redirect tinyint(1) unsigned NOT NULL default '0',
+
+ -- 1 indicates this is a new entry, with only one edit.
+ -- Not all pages with one edit are new pages.
+ page_is_new tinyint(1) unsigned NOT NULL default '0',
+
+ -- Random value between 0 and 1, used for Special:Randompage
+ page_random real unsigned NOT NULL,
+
+ -- This timestamp is updated whenever the page changes in
+ -- a way requiring it to be re-rendered, invalidating caches.
+ -- Aside from editing this includes permission changes,
+ -- creation or deletion of linked pages, and alteration
+ -- of contained templates.
+ page_touched char(14) binary NOT NULL default '',
+
+ -- Handy key to revision.rev_id of the current revision.
+ -- This may be 0 during page creation, but that shouldn't
+ -- happen outside of a transaction... hopefully.
+ page_latest int(8) unsigned NOT NULL,
+
+ -- Uncompressed length in bytes of the page's current source text.
+ page_len int(8) unsigned NOT NULL,
+
+ PRIMARY KEY page_id (page_id),
+ UNIQUE INDEX name_title (page_namespace,page_title),
+
+ -- Special-purpose indexes
+ INDEX (page_random),
+ INDEX (page_len)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Every edit of a page creates also a revision row.
+-- This stores metadata about the revision, and a reference
+-- to the text storage backend.
+--
+CREATE TABLE /*$wgDBprefix*/revision (
+ rev_id int(8) unsigned NOT NULL auto_increment,
+
+ -- Key to page_id. This should _never_ be invalid.
+ rev_page int(8) unsigned NOT NULL,
+
+ -- Key to text.old_id, where the actual bulk text is stored.
+ -- It's possible for multiple revisions to use the same text,
+ -- for instance revisions where only metadata is altered
+ -- or a rollback to a previous version.
+ rev_text_id int(8) unsigned NOT NULL,
+
+ -- Text comment summarizing the change.
+ -- This text is shown in the history and other changes lists,
+ -- rendered in a subset of wiki markup by Linker::formatComment()
+ rev_comment tinyblob NOT NULL default '',
+
+ -- Key to user.user_id of the user who made this edit.
+ -- Stores 0 for anonymous edits and for some mass imports.
+ rev_user int(5) unsigned NOT NULL default '0',
+
+ -- Text username or IP address of the editor.
+ rev_user_text varchar(255) binary NOT NULL default '',
+
+ -- Timestamp
+ rev_timestamp char(14) binary NOT NULL default '',
+
+ -- Records whether the user marked the 'minor edit' checkbox.
+ -- Many automated edits are marked as minor.
+ rev_minor_edit tinyint(1) unsigned NOT NULL default '0',
+
+ -- Not yet used; reserved for future changes to the deletion system.
+ rev_deleted tinyint(1) unsigned NOT NULL default '0',
+
+ PRIMARY KEY rev_page_id (rev_page, rev_id),
+ UNIQUE INDEX rev_id (rev_id),
+ INDEX rev_timestamp (rev_timestamp),
+ INDEX page_timestamp (rev_page,rev_timestamp),
+ INDEX user_timestamp (rev_user,rev_timestamp),
+ INDEX usertext_timestamp (rev_user_text,rev_timestamp)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Holds text of individual page revisions.
+--
+-- Field names are a holdover from the 'old' revisions table in
+-- MediaWiki 1.4 and earlier: an upgrade will transform that
+-- table into the 'text' table to minimize unnecessary churning
+-- and downtime. If upgrading, the other fields will be left unused.
+--
+CREATE TABLE /*$wgDBprefix*/text (
+ -- Unique text storage key number.
+ -- Note that the 'oldid' parameter used in URLs does *not*
+ -- refer to this number anymore, but to rev_id.
+ --
+ -- revision.rev_text_id is a key to this column
+ old_id int(8) unsigned NOT NULL auto_increment,
+
+ -- Depending on the contents of the old_flags field, the text
+ -- may be convenient plain text, or it may be funkily encoded.
+ old_text mediumblob NOT NULL default '',
+
+ -- Comma-separated list of flags:
+ -- gzip: text is compressed with PHP's gzdeflate() function.
+ -- utf8: text was stored as UTF-8.
+ -- If $wgLegacyEncoding option is on, rows *without* this flag
+ -- will be converted to UTF-8 transparently at load time.
+ -- object: text field contained a serialized PHP object.
+ -- The object either contains multiple versions compressed
+ -- together to achieve a better compression ratio, or it refers
+ -- to another row where the text can be found.
+ old_flags tinyblob NOT NULL default '',
+
+ PRIMARY KEY old_id (old_id)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Holding area for deleted articles, which may be viewed
+-- or restored by admins through the Special:Undelete interface.
+-- The fields generally correspond to the page, revision, and text
+-- fields, with several caveats.
+--
+CREATE TABLE /*$wgDBprefix*/archive (
+ ar_namespace int NOT NULL default '0',
+ ar_title varchar(255) binary NOT NULL default '',
+
+ -- Newly deleted pages will not store text in this table,
+ -- but will reference the separately existing text rows.
+ -- This field is retained for backwards compatibility,
+ -- so old archived pages will remain accessible after
+ -- upgrading from 1.4 to 1.5.
+ -- Text may be gzipped or otherwise funky.
+ ar_text mediumblob NOT NULL default '',
+
+ -- Basic revision stuff...
+ ar_comment tinyblob NOT NULL default '',
+ ar_user int(5) unsigned NOT NULL default '0',
+ ar_user_text varchar(255) binary NOT NULL,
+ ar_timestamp char(14) binary NOT NULL default '',
+ ar_minor_edit tinyint(1) NOT NULL default '0',
+
+ -- See ar_text note.
+ ar_flags tinyblob NOT NULL default '',
+
+ -- When revisions are deleted, their unique rev_id is stored
+ -- here so it can be retained after undeletion. This is necessary
+ -- to retain permalinks to given revisions after accidental delete
+ -- cycles or messy operations like history merges.
+ --
+ -- Old entries from 1.4 will be NULL here, and a new rev_id will
+ -- be created on undeletion for those revisions.
+ ar_rev_id int(8) unsigned,
+
+ -- For newly deleted revisions, this is the text.old_id key to the
+ -- actual stored text. To avoid breaking the block-compression scheme
+ -- and otherwise making storage changes harder, the actual text is
+ -- *not* deleted from the text table, merely hidden by removal of the
+ -- page and revision entries.
+ --
+ -- Old entries deleted under 1.2-1.4 will have NULL here, and their
+ -- ar_text and ar_flags fields will be used to create a new text
+ -- row upon undeletion.
+ ar_text_id int(8) unsigned,
+
+ KEY name_title_timestamp (ar_namespace,ar_title,ar_timestamp)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Track page-to-page hyperlinks within the wiki.
+--
+CREATE TABLE /*$wgDBprefix*/pagelinks (
+ -- Key to the page_id of the page containing the link.
+ pl_from int(8) unsigned NOT NULL default '0',
+
+ -- Key to page_namespace/page_title of the target page.
+ -- The target page may or may not exist, and due to renames
+ -- and deletions may refer to different page records as time
+ -- goes by.
+ pl_namespace int NOT NULL default '0',
+ pl_title varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY pl_from(pl_from,pl_namespace,pl_title),
+ KEY (pl_namespace,pl_title)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Track template inclusions.
+--
+CREATE TABLE /*$wgDBprefix*/templatelinks (
+ -- Key to the page_id of the page containing the link.
+ tl_from int(8) unsigned NOT NULL default '0',
+
+ -- Key to page_namespace/page_title of the target page.
+ -- The target page may or may not exist, and due to renames
+ -- and deletions may refer to different page records as time
+ -- goes by.
+ tl_namespace int NOT NULL default '0',
+ tl_title varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY tl_from(tl_from,tl_namespace,tl_title),
+ KEY (tl_namespace,tl_title)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Track links to images *used inline*
+-- We don't distinguish live from broken links here, so
+-- they do not need to be changed on upload/removal.
+--
+CREATE TABLE /*$wgDBprefix*/imagelinks (
+ -- Key to page_id of the page containing the image / media link.
+ il_from int(8) unsigned NOT NULL default '0',
+
+ -- Filename of target image.
+ -- This is also the page_title of the file's description page;
+ -- all such pages are in namespace 6 (NS_IMAGE).
+ il_to varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY il_from(il_from,il_to),
+ KEY (il_to)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Track category inclusions *used inline*
+-- This tracks a single level of category membership
+-- (folksonomic tagging, really).
+--
+CREATE TABLE /*$wgDBprefix*/categorylinks (
+ -- Key to page_id of the page defined as a category member.
+ cl_from int(8) unsigned NOT NULL default '0',
+
+ -- Name of the category.
+ -- This is also the page_title of the category's description page;
+ -- all such pages are in namespace 14 (NS_CATEGORY).
+ cl_to varchar(255) binary NOT NULL default '',
+
+ -- The title of the linking page, or an optional override
+ -- to determine sort order. Sorting is by binary order, which
+ -- isn't always ideal, but collations seem to be an exciting
+ -- and dangerous new world in MySQL...
+ --
+ -- For MySQL 4.1+ with charset set to utf8, the sort key *index*
+ -- needs cut to be smaller than 1024 bytes (at 3 bytes per char).
+ -- To sort properly on the shorter key, this field needs to be
+ -- the same shortness.
+ cl_sortkey varchar(86) binary NOT NULL default '',
+
+ -- This isn't really used at present. Provided for an optional
+ -- sorting method by approximate addition time.
+ cl_timestamp timestamp NOT NULL,
+
+ UNIQUE KEY cl_from(cl_from,cl_to),
+
+ -- We always sort within a given category...
+ KEY cl_sortkey(cl_to,cl_sortkey),
+
+ -- Not really used?
+ KEY cl_timestamp(cl_to,cl_timestamp)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Track links to external URLs
+--
+CREATE TABLE /*$wgDBprefix*/externallinks (
+ -- page_id of the referring page
+ el_from int(8) unsigned NOT NULL default '0',
+
+ -- The URL
+ el_to blob NOT NULL default '',
+
+ -- In the case of HTTP URLs, this is the URL with any username or password
+ -- removed, and with the labels in the hostname reversed and converted to
+ -- lower case. An extra dot is added to allow for matching of either
+ -- example.com or *.example.com in a single scan.
+ -- Example:
+ -- http://user:password@sub.example.com/page.html
+ -- becomes
+ -- http://com.example.sub./page.html
+ -- which allows for fast searching for all pages under example.com with the
+ -- clause:
+ -- WHERE el_index LIKE 'http://com.example.%'
+ el_index blob NOT NULL default '',
+
+ KEY (el_from, el_to(40)),
+ KEY (el_to(60), el_from),
+ KEY (el_index(60))
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Track interlanguage links
+--
+CREATE TABLE /*$wgDBprefix*/langlinks (
+ -- page_id of the referring page
+ ll_from int(8) unsigned NOT NULL default '0',
+
+ -- Language code of the target
+ ll_lang varchar(10) binary NOT NULL default '',
+
+ -- Title of the target, including namespace
+ ll_title varchar(255) binary NOT NULL default '',
+
+ UNIQUE KEY (ll_from, ll_lang),
+ KEY (ll_lang, ll_title)
+) ENGINE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Contains a single row with some aggregate info
+-- on the state of the site.
+--
+CREATE TABLE /*$wgDBprefix*/site_stats (
+ -- The single row should contain 1 here.
+ ss_row_id int(8) unsigned NOT NULL,
+
+ -- Total number of page views, if hit counters are enabled.
+ ss_total_views bigint(20) unsigned default '0',
+
+ -- Total number of edits performed.
+ ss_total_edits bigint(20) unsigned default '0',
+
+ -- An approximate count of pages matching the following criteria:
+ -- * in namespace 0
+ -- * not a redirect
+ -- * contains the text '[['
+ -- See Article::isCountable() in includes/Article.php
+ ss_good_articles bigint(20) unsigned default '0',
+
+ -- Total pages, theoretically equal to SELECT COUNT(*) FROM page; except faster
+ ss_total_pages bigint(20) default '-1',
+
+ -- Number of users, theoretically equal to SELECT COUNT(*) FROM user;
+ ss_users bigint(20) default '-1',
+
+ -- Deprecated, no longer updated as of 1.5
+ ss_admins int(10) default '-1',
+
+ -- Number of images, equivalent to SELECT COUNT(*) FROM image
+ ss_images int(10) default '0',
+
+ UNIQUE KEY ss_row_id (ss_row_id)
+
+) TYPE=InnoDB;
+
+--
+-- Stores an ID for every time any article is visited;
+-- depending on $wgHitcounterUpdateFreq, it is
+-- periodically cleared and the page_counter column
+-- in the page table updated for the all articles
+-- that have been visited.)
+--
+CREATE TABLE /*$wgDBprefix*/hitcounter (
+ hc_id INTEGER UNSIGNED NOT NULL
+) TYPE=HEAP MAX_ROWS=25000;
+
+
+--
+-- The internet is full of jerks, alas. Sometimes it's handy
+-- to block a vandal or troll account.
+--
+CREATE TABLE /*$wgDBprefix*/ipblocks (
+ -- Primary key, introduced for privacy.
+ ipb_id int(8) NOT NULL auto_increment,
+
+ -- Blocked IP address in dotted-quad form or user name.
+ ipb_address varchar(40) binary NOT NULL default '',
+
+ -- Blocked user ID or 0 for IP blocks.
+ ipb_user int(8) unsigned NOT NULL default '0',
+
+ -- User ID who made the block.
+ ipb_by int(8) unsigned NOT NULL default '0',
+
+ -- Text comment made by blocker.
+ ipb_reason tinyblob NOT NULL default '',
+
+ -- Creation (or refresh) date in standard YMDHMS form.
+ -- IP blocks expire automatically.
+ ipb_timestamp char(14) binary NOT NULL default '',
+
+ -- Indicates that the IP address was banned because a banned
+ -- user accessed a page through it. If this is 1, ipb_address
+ -- will be hidden, and the block identified by block ID number.
+ ipb_auto tinyint(1) NOT NULL default '0',
+
+ -- Time at which the block will expire.
+ ipb_expiry char(14) binary NOT NULL default '',
+
+ -- Start and end of an address range, in hexadecimal
+ -- Size chosen to allow IPv6
+ ipb_range_start varchar(32) NOT NULL default '',
+ ipb_range_end varchar(32) NOT NULL default '',
+
+ PRIMARY KEY ipb_id (ipb_id),
+ INDEX ipb_address (ipb_address),
+ INDEX ipb_user (ipb_user),
+ INDEX ipb_range (ipb_range_start(8), ipb_range_end(8))
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Uploaded images and other files.
+--
+CREATE TABLE /*$wgDBprefix*/image (
+ -- Filename.
+ -- This is also the title of the associated description page,
+ -- which will be in namespace 6 (NS_IMAGE).
+ img_name varchar(255) binary NOT NULL default '',
+
+ -- File size in bytes.
+ img_size int(8) unsigned NOT NULL default '0',
+
+ -- For images, size in pixels.
+ img_width int(5) NOT NULL default '0',
+ img_height int(5) NOT NULL default '0',
+
+ -- Extracted EXIF metadata stored as a serialized PHP array.
+ img_metadata mediumblob NOT NULL,
+
+ -- For images, bits per pixel if known.
+ img_bits int(3) NOT NULL default '0',
+
+ -- Media type as defined by the MEDIATYPE_xxx constants
+ img_media_type ENUM("UNKNOWN", "BITMAP", "DRAWING", "AUDIO", "VIDEO", "MULTIMEDIA", "OFFICE", "TEXT", "EXECUTABLE", "ARCHIVE") default NULL,
+
+ -- major part of a MIME media type as defined by IANA
+ -- see http://www.iana.org/assignments/media-types/
+ img_major_mime ENUM("unknown", "application", "audio", "image", "text", "video", "message", "model", "multipart") NOT NULL default "unknown",
+
+ -- minor part of a MIME media type as defined by IANA
+ -- the minor parts are not required to adher to any standard
+ -- but should be consistent throughout the database
+ -- see http://www.iana.org/assignments/media-types/
+ img_minor_mime varchar(32) NOT NULL default "unknown",
+
+ -- Description field as entered by the uploader.
+ -- This is displayed in image upload history and logs.
+ img_description tinyblob NOT NULL default '',
+
+ -- user_id and user_name of uploader.
+ img_user int(5) unsigned NOT NULL default '0',
+ img_user_text varchar(255) binary NOT NULL default '',
+
+ -- Time of the upload.
+ img_timestamp char(14) binary NOT NULL default '',
+
+ PRIMARY KEY img_name (img_name),
+
+ -- Used by Special:Imagelist for sort-by-size
+ INDEX img_size (img_size),
+
+ -- Used by Special:Newimages and Special:Imagelist
+ INDEX img_timestamp (img_timestamp)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Previous revisions of uploaded files.
+-- Awkwardly, image rows have to be moved into
+-- this table at re-upload time.
+--
+CREATE TABLE /*$wgDBprefix*/oldimage (
+ -- Base filename: key to image.img_name
+ oi_name varchar(255) binary NOT NULL default '',
+
+ -- Filename of the archived file.
+ -- This is generally a timestamp and '!' prepended to the base name.
+ oi_archive_name varchar(255) binary NOT NULL default '',
+
+ -- Other fields as in image...
+ oi_size int(8) unsigned NOT NULL default 0,
+ oi_width int(5) NOT NULL default 0,
+ oi_height int(5) NOT NULL default 0,
+ oi_bits int(3) NOT NULL default 0,
+ oi_description tinyblob NOT NULL default '',
+ oi_user int(5) unsigned NOT NULL default '0',
+ oi_user_text varchar(255) binary NOT NULL default '',
+ oi_timestamp char(14) binary NOT NULL default '',
+
+ INDEX oi_name (oi_name(10))
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Record of deleted file data
+--
+CREATE TABLE /*$wgDBprefix*/filearchive (
+ -- Unique row id
+ fa_id int not null auto_increment,
+
+ -- Original base filename; key to image.img_name, page.page_title, etc
+ fa_name varchar(255) binary NOT NULL default '',
+
+ -- Filename of archived file, if an old revision
+ fa_archive_name varchar(255) binary default '',
+
+ -- Which storage bin (directory tree or object store) the file data
+ -- is stored in. Should be 'deleted' for files that have been deleted;
+ -- any other bin is not yet in use.
+ fa_storage_group varchar(16),
+
+ -- SHA-1 of the file contents plus extension, used as a key for storage.
+ -- eg 8f8a562add37052a1848ff7771a2c515db94baa9.jpg
+ --
+ -- If NULL, the file was missing at deletion time or has been purged
+ -- from the archival storage.
+ fa_storage_key varchar(64) binary default '',
+
+ -- Deletion information, if this file is deleted.
+ fa_deleted_user int,
+ fa_deleted_timestamp char(14) binary default '',
+ fa_deleted_reason text,
+
+ -- Duped fields from image
+ fa_size int(8) unsigned default '0',
+ fa_width int(5) default '0',
+ fa_height int(5) default '0',
+ fa_metadata mediumblob,
+ fa_bits int(3) default '0',
+ fa_media_type ENUM("UNKNOWN", "BITMAP", "DRAWING", "AUDIO", "VIDEO", "MULTIMEDIA", "OFFICE", "TEXT", "EXECUTABLE", "ARCHIVE") default NULL,
+ fa_major_mime ENUM("unknown", "application", "audio", "image", "text", "video", "message", "model", "multipart") default "unknown",
+ fa_minor_mime varchar(32) default "unknown",
+ fa_description tinyblob default '',
+ fa_user int(5) unsigned default '0',
+ fa_user_text varchar(255) binary default '',
+ fa_timestamp char(14) binary default '',
+
+ PRIMARY KEY (fa_id),
+ INDEX (fa_name, fa_timestamp), -- pick out by image name
+ INDEX (fa_storage_group, fa_storage_key), -- pick out dupe files
+ INDEX (fa_deleted_timestamp), -- sort by deletion time
+ INDEX (fa_deleted_user) -- sort by deleter
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Primarily a summary table for Special:Recentchanges,
+-- this table contains some additional info on edits from
+-- the last few days, see Article::editUpdates()
+--
+CREATE TABLE /*$wgDBprefix*/recentchanges (
+ rc_id int(8) NOT NULL auto_increment,
+ rc_timestamp varchar(14) binary NOT NULL default '',
+ rc_cur_time varchar(14) binary NOT NULL default '',
+
+ -- As in revision
+ rc_user int(10) unsigned NOT NULL default '0',
+ rc_user_text varchar(255) binary NOT NULL default '',
+
+ -- When pages are renamed, their RC entries do _not_ change.
+ rc_namespace int NOT NULL default '0',
+ rc_title varchar(255) binary NOT NULL default '',
+
+ -- as in revision...
+ rc_comment varchar(255) binary NOT NULL default '',
+ rc_minor tinyint(3) unsigned NOT NULL default '0',
+
+ -- Edits by user accounts with the 'bot' rights key are
+ -- marked with a 1 here, and will be hidden from the
+ -- default view.
+ rc_bot tinyint(3) unsigned NOT NULL default '0',
+
+ rc_new tinyint(3) unsigned NOT NULL default '0',
+
+ -- Key to page_id (was cur_id prior to 1.5).
+ -- This will keep links working after moves while
+ -- retaining the at-the-time name in the changes list.
+ rc_cur_id int(10) unsigned NOT NULL default '0',
+
+ -- rev_id of the given revision
+ rc_this_oldid int(10) unsigned NOT NULL default '0',
+
+ -- rev_id of the prior revision, for generating diff links.
+ rc_last_oldid int(10) unsigned NOT NULL default '0',
+
+ -- These may no longer be used, with the new move log.
+ rc_type tinyint(3) unsigned NOT NULL default '0',
+ rc_moved_to_ns tinyint(3) unsigned NOT NULL default '0',
+ rc_moved_to_title varchar(255) binary NOT NULL default '',
+
+ -- If the Recent Changes Patrol option is enabled,
+ -- users may mark edits as having been reviewed to
+ -- remove a warning flag on the RC list.
+ -- A value of 1 indicates the page has been reviewed.
+ rc_patrolled tinyint(3) unsigned NOT NULL default '0',
+
+ -- Recorded IP address the edit was made from, if the
+ -- $wgPutIPinRC option is enabled.
+ rc_ip char(15) NOT NULL default '',
+
+ PRIMARY KEY rc_id (rc_id),
+ INDEX rc_timestamp (rc_timestamp),
+ INDEX rc_namespace_title (rc_namespace, rc_title),
+ INDEX rc_cur_id (rc_cur_id),
+ INDEX new_name_timestamp(rc_new,rc_namespace,rc_timestamp),
+ INDEX rc_ip (rc_ip)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+CREATE TABLE /*$wgDBprefix*/watchlist (
+ -- Key to user.user_id
+ wl_user int(5) unsigned NOT NULL,
+
+ -- Key to page_namespace/page_title
+ -- Note that users may watch pages which do not exist yet,
+ -- or existed in the past but have been deleted.
+ wl_namespace int NOT NULL default '0',
+ wl_title varchar(255) binary NOT NULL default '',
+
+ -- Timestamp when user was last sent a notification e-mail;
+ -- cleared when the user visits the page.
+ wl_notificationtimestamp varchar(14) binary,
+
+ UNIQUE KEY (wl_user, wl_namespace, wl_title),
+ KEY namespace_title (wl_namespace,wl_title)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+
+--
+-- Used by the math module to keep track
+-- of previously-rendered items.
+--
+CREATE TABLE /*$wgDBprefix*/math (
+ -- Binary MD5 hash of the latex fragment, used as an identifier key.
+ math_inputhash varbinary(16) NOT NULL,
+
+ -- Not sure what this is, exactly...
+ math_outputhash varbinary(16) NOT NULL,
+
+ -- texvc reports how well it thinks the HTML conversion worked;
+ -- if it's a low level the PNG rendering may be preferred.
+ math_html_conservativeness tinyint(1) NOT NULL,
+
+ -- HTML output from texvc, if any
+ math_html text,
+
+ -- MathML output from texvc, if any
+ math_mathml text,
+
+ UNIQUE KEY math_inputhash (math_inputhash)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- When using the default MySQL search backend, page titles
+-- and text are munged to strip markup, do Unicode case folding,
+-- and prepare the result for MySQL's fulltext index.
+--
+-- This table must be MyISAM; InnoDB does not support the needed
+-- fulltext index.
+--
+CREATE TABLE /*$wgDBprefix*/searchindex (
+ -- Key to page_id
+ si_page int(8) unsigned NOT NULL,
+
+ -- Munged version of title
+ si_title varchar(255) NOT NULL default '',
+
+ -- Munged version of body text
+ si_text mediumtext NOT NULL default '',
+
+ UNIQUE KEY (si_page),
+ FULLTEXT si_title (si_title),
+ FULLTEXT si_text (si_text)
+
+) TYPE=MyISAM, DEFAULT CHARSET=utf8;
+
+--
+-- Recognized interwiki link prefixes
+--
+CREATE TABLE /*$wgDBprefix*/interwiki (
+ -- The interwiki prefix, (e.g. "Meatball", or the language prefix "de")
+ iw_prefix char(32) NOT NULL,
+
+ -- The URL of the wiki, with "$1" as a placeholder for an article name.
+ -- Any spaces in the name will be transformed to underscores before
+ -- insertion.
+ iw_url char(127) NOT NULL,
+
+ -- A boolean value indicating whether the wiki is in this project
+ -- (used, for example, to detect redirect loops)
+ iw_local BOOL NOT NULL,
+
+ -- Boolean value indicating whether interwiki transclusions are allowed.
+ iw_trans TINYINT(1) NOT NULL DEFAULT 0,
+
+ UNIQUE KEY iw_prefix (iw_prefix)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Used for caching expensive grouped queries
+--
+CREATE TABLE /*$wgDBprefix*/querycache (
+ -- A key name, generally the base name of of the special page.
+ qc_type char(32) NOT NULL,
+
+ -- Some sort of stored value. Sizes, counts...
+ qc_value int(5) unsigned NOT NULL default '0',
+
+ -- Target namespace+title
+ qc_namespace int NOT NULL default '0',
+ qc_title char(255) binary NOT NULL default '',
+
+ KEY (qc_type,qc_value)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- For a few generic cache operations if not using Memcached
+--
+CREATE TABLE /*$wgDBprefix*/objectcache (
+ keyname char(255) binary not null default '',
+ value mediumblob,
+ exptime datetime,
+ unique key (keyname),
+ key (exptime)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+--
+-- Cache of interwiki transclusion
+--
+CREATE TABLE /*$wgDBprefix*/transcache (
+ tc_url VARCHAR(255) NOT NULL,
+ tc_contents TEXT,
+ tc_time INT NOT NULL,
+ UNIQUE INDEX tc_url_idx(tc_url)
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+CREATE TABLE /*$wgDBprefix*/logging (
+ -- Symbolic keys for the general log type and the action type
+ -- within the log. The output format will be controlled by the
+ -- action field, but only the type controls categorization.
+ log_type char(10) NOT NULL default '',
+ log_action char(10) NOT NULL default '',
+
+ -- Timestamp. Duh.
+ log_timestamp char(14) NOT NULL default '19700101000000',
+
+ -- The user who performed this action; key to user_id
+ log_user int unsigned NOT NULL default 0,
+
+ -- Key to the page affected. Where a user is the target,
+ -- this will point to the user page.
+ log_namespace int NOT NULL default 0,
+ log_title varchar(255) binary NOT NULL default '',
+
+ -- Freeform text. Interpreted as edit history comments.
+ log_comment varchar(255) NOT NULL default '',
+
+ -- LF separated list of miscellaneous parameters
+ log_params blob NOT NULL default '',
+
+ KEY type_time (log_type, log_timestamp),
+ KEY user_time (log_user, log_timestamp),
+ KEY page_time (log_namespace, log_title, log_timestamp)
+
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+CREATE TABLE /*$wgDBprefix*/trackbacks (
+ tb_id integer AUTO_INCREMENT PRIMARY KEY,
+ tb_page integer REFERENCES page(page_id) ON DELETE CASCADE,
+ tb_title varchar(255) NOT NULL,
+ tb_url varchar(255) NOT NULL,
+ tb_ex text,
+ tb_name varchar(255),
+
+ INDEX (tb_page)
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+-- Jobs performed by parallel apache threads or a command-line daemon
+CREATE TABLE /*$wgDBprefix*/job (
+ job_id int(9) unsigned NOT NULL auto_increment,
+
+ -- Command name, currently only refreshLinks is defined
+ job_cmd varchar(255) NOT NULL default '',
+
+ -- Namespace and title to act on
+ -- Should be 0 and '' if the command does not operate on a title
+ job_namespace int NOT NULL,
+ job_title varchar(255) binary NOT NULL,
+
+ -- Any other parameters to the command
+ -- Presently unused, format undefined
+ job_params blob NOT NULL default '',
+
+ PRIMARY KEY job_id (job_id),
+ KEY (job_cmd, job_namespace, job_title)
+) TYPE=InnoDB, DEFAULT CHARSET=utf8;
+
+-- Details of updates to cached special pages
+CREATE TABLE /*$wgDBprefix*/querycache_info (
+
+ -- Special page name
+ -- Corresponds to a qc_type value
+ qci_type varchar(32) NOT NULL default '',
+
+ -- Timestamp of last update
+ qci_timestamp char(14) NOT NULL default '19700101000000',
+
+ UNIQUE KEY ( qci_type )
+
+) TYPE=InnoDB; \ No newline at end of file
diff --git a/maintenance/namespace2sql.php b/maintenance/namespace2sql.php
new file mode 100644
index 00000000..8084bfec
--- /dev/null
+++ b/maintenance/namespace2sql.php
@@ -0,0 +1,14 @@
+<?php
+#
+# Print SQL to insert namespace names into database.
+# This source code is in the public domain.
+
+require_once( "commandLine.inc" );
+
+for ($i = -2; $i < 16; ++$i) {
+ $nsname = wfStrencode( $wgLang->getNsText( $i ) );
+ $dbname = wfStrencode( $wgDBname );
+ print "INSERT INTO ns_name(ns_db, ns_num, ns_name) VALUES('$dbname', $i, '$nsname');\n";
+}
+
+?>
diff --git a/maintenance/namespaceDupes.php b/maintenance/namespaceDupes.php
new file mode 100644
index 00000000..ad56eee7
--- /dev/null
+++ b/maintenance/namespaceDupes.php
@@ -0,0 +1,194 @@
+<?php
+# Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+$options = array( 'fix', 'suffix', 'help' );
+
+/** */
+require_once( 'commandLine.inc' );
+#require_once( 'maintenance/userDupes.inc' );
+
+if(isset( $options['help'] ) ) {
+print <<<END
+usage: namespaceDupes.php [--fix] [--suffix=<text>] [--help]
+ --help : this help message
+ --fix : attempt to automatically fix errors
+ --suffix=<text> : dupes will be renamed with correct namespace with <text>
+ appended after the article name.
+
+END;
+die;
+}
+
+class NamespaceConflictChecker {
+ function NamespaceConflictChecker( &$db ) {
+ $this->db =& $db;
+ }
+
+ function checkAll( $fix, $suffix = '' ) {
+ global $wgContLang;
+ $spaces = $wgContLang->getNamespaces();
+ $ok = true;
+ foreach( $spaces as $ns => $name ) {
+ $ok = $this->checkNamespace( $ns, $name, $fix, $suffix ) && $ok;
+ }
+ return $ok;
+ }
+
+ function checkNamespace( $ns, $name, $fix, $suffix = '' ) {
+ echo "Checking namespace $ns: \"$name\"\n";
+ if( $name == '' ) {
+ echo "... skipping article namespace\n";
+ return true;
+ }
+
+ $conflicts = $this->getConflicts( $ns, $name );
+ $count = count( $conflicts );
+ if( $count == 0 ) {
+ echo "... no conflicts detected!\n";
+ return true;
+ }
+
+ echo "... $count conflicts detected:\n";
+ $ok = true;
+ foreach( $conflicts as $row ) {
+ $resolvable = $this->reportConflict( $row, $suffix );
+ $ok = $ok && $resolvable;
+ if( $fix && ( $resolvable || $suffix != '' ) ) {
+ $ok = $this->resolveConflict( $row, $resolvable, $suffix ) && $ok;
+ }
+ }
+ return $ok;
+ }
+
+ /**
+ * @fixme: do this for reals
+ */
+ function checkPrefix( $key, $prefix, $fix, $suffix = '' ) {
+ echo "Checking prefix \"$prefix\" vs namespace $key\n";
+ return $this->checkNamespace( $key, $prefix, $fix, $suffix );
+ }
+
+ function getConflicts( $ns, $name ) {
+ $page = $this->newSchema() ? 'page' : 'cur';
+ $table = $this->db->tableName( $page );
+
+ $prefix = $this->db->strencode( $name );
+ $likeprefix = str_replace( '_', '\\_', $prefix);
+
+ $sql = "SELECT {$page}_id AS id,
+ {$page}_title AS oldtitle,
+ $ns AS namespace,
+ TRIM(LEADING '$prefix:' FROM {$page}_title) AS title
+ FROM {$table}
+ WHERE {$page}_namespace=0
+ AND {$page}_title LIKE '$likeprefix:%'";
+
+ $result = $this->db->query( $sql, 'NamespaceConflictChecker::getConflicts' );
+
+ $set = array();
+ while( $row = $this->db->fetchObject( $result ) ) {
+ $set[] = $row;
+ }
+ $this->db->freeResult( $result );
+
+ return $set;
+ }
+
+ function reportConflict( $row, $suffix ) {
+ $newTitle = Title::makeTitle( $row->namespace, $row->title );
+ printf( "... %d (0,\"%s\") -> (%d,\"%s\") [[%s]]\n",
+ $row->id,
+ $row->oldtitle,
+ $row->namespace,
+ $row->title,
+ $newTitle->getPrefixedText() );
+
+ $id = $newTitle->getArticleId();
+ if( $id ) {
+ echo "... *** cannot resolve automatically; page exists with ID $id ***\n";
+ return false;
+ } else {
+ return true;
+ }
+ }
+
+ function resolveConflict( $row, $resolvable, $suffix ) {
+ if( !$resolvable ) {
+ $row->title .= $suffix;
+ $title = Title::makeTitle( $row->namespace, $row->title );
+ echo "... *** using suffixed form [[" . $title->getPrefixedText() . "]] ***\n";
+ }
+ $tables = $this->newSchema()
+ ? array( 'page' )
+ : array( 'cur', 'old' );
+ foreach( $tables as $table ) {
+ $this->resolveConflictOn( $row, $table );
+ }
+ return true;
+ }
+
+ function resolveConflictOn( $row, $table ) {
+ $fname = 'NamespaceConflictChecker::resolveConflictOn';
+ echo "... resolving on $table... ";
+ $this->db->update( $table,
+ array(
+ "{$table}_namespace" => $row->namespace,
+ "{$table}_title" => $row->title,
+ ),
+ array(
+ "{$table}_namespace" => 0,
+ "{$table}_title" => $row->oldtitle,
+ ),
+ $fname );
+ echo "ok.\n";
+ return true;
+ }
+
+ function newSchema() {
+ return class_exists( 'Revision' );
+ }
+}
+
+
+
+
+$wgTitle = Title::newFromText( 'Namespace title conflict cleanup script' );
+
+$fix = isset( $options['fix'] );
+$suffix = isset( $options['suffix'] ) ? $options['suffix'] : '';
+$prefix = isset( $options['prefix'] ) ? $options['prefix'] : '';
+$key = isset( $options['key'] ) ? intval( $options['key'] ) : 0;
+$dbw =& wfGetDB( DB_MASTER );
+$duper = new NamespaceConflictChecker( $dbw );
+
+if( $prefix ) {
+ $retval = $duper->checkPrefix( $key, $prefix, $fix, $suffix );
+} else {
+ $retval = $duper->checkAll( $fix, $suffix );
+}
+
+if( $retval ) {
+ echo "\nLooks good!\n";
+ exit( 0 );
+} else {
+ echo "\nOh noeees\n";
+ exit( -1 );
+}
+
+?>
diff --git a/maintenance/nukePage.inc b/maintenance/nukePage.inc
new file mode 100644
index 00000000..921faba6
--- /dev/null
+++ b/maintenance/nukePage.inc
@@ -0,0 +1,80 @@
+<?php
+
+/**
+ * Support functions for the nukeArticle script
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+require_once( 'purgeOldText.inc' );
+
+function NukePage( $name, $delete = false ) {
+
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->begin();
+
+ $tbl_pag = $dbw->tableName( 'page' );
+ $tbl_rec = $dbw->tableName( 'recentchanges' );
+ $tbl_rev = $dbw->tableName( 'revision' );
+
+ # Get page ID
+ echo( "Searching for \"$name\"..." );
+ $title = Title::newFromText( $name );
+ if( $title ) {
+ $id = $title->getArticleID();
+ $real = $title->getPrefixedText();
+ echo( "found \"$real\" with ID $id.\n" );
+
+ # Get corresponding revisions
+ echo( "Searching for revisions..." );
+ $res = $dbw->query( "SELECT rev_id FROM $tbl_rev WHERE rev_page = $id" );
+ while( $row = $dbw->fetchObject( $res ) ) {
+ $revs[] = $row->rev_id;
+ }
+ $count = count( $revs );
+ echo( "found $count.\n" );
+
+ # Delete the page record and associated recent changes entries
+ if( $delete ) {
+ echo( "Deleting page record..." );
+ $dbw->query( "DELETE FROM $tbl_pag WHERE page_id = $id" );
+ echo( "done.\n" );
+ echo( "Cleaning up recent changes..." );
+ $dbw->query( "DELETE FROM $tbl_rec WHERE rc_cur_id = $id" );
+ echo( "done.\n" );
+ }
+
+ $dbw->commit();
+
+ # Delete revisions as appropriate
+ if( $delete && $count ) {
+ echo( "Deleting revisions..." );
+ DeleteRevisions( $revs );
+ echo( "done.\n" );
+ PurgeRedundantText( true );
+ }
+
+ } else {
+ echo( "not found in database.\n" );
+ $dbw->commit();
+ }
+
+}
+
+function DeleteRevisions( $ids ) {
+
+ $dbw =& wfGetDB( DB_MASTER );
+ $dbw->begin();
+
+ $tbl_rev = $dbw->tableName( 'revision' );
+
+ $set = implode( ', ', $ids );
+ $dbw->query( "DELETE FROM $tbl_rev WHERE rev_id IN ( $set )" );
+
+ $dbw->commit();
+
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/nukePage.php b/maintenance/nukePage.php
new file mode 100644
index 00000000..b5c3f283
--- /dev/null
+++ b/maintenance/nukePage.php
@@ -0,0 +1,30 @@
+<?php
+
+/**
+ * Erase a page record from the database
+ * Irreversible (can't use standard undelete) and does not update link tables
+ *
+ * @package MediaWiki
+ * @subpackage Maintenance
+ * @author Rob Church <robchur@gmail.com>
+ */
+
+require_once( 'commandLine.inc' );
+require_once( 'nukePage.inc' );
+
+echo( "Erase Page Record\n\n" );
+
+if( isset( $args[0] ) ) {
+ NukePage( $args[0], true );
+} else {
+ ShowUsage();
+}
+
+/** Show script usage information */
+function ShowUsage() {
+ echo( "Remove a page record from the database.\n\n" );
+ echo( "Usage: php nukePage.php <title>\n\n" );
+ echo( " <title> : Page title; spaces escaped with underscores\n\n" );
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/oracle/archives/patch-trackbacks.sql b/maintenance/oracle/archives/patch-trackbacks.sql
new file mode 100644
index 00000000..15d4eef1
--- /dev/null
+++ b/maintenance/oracle/archives/patch-trackbacks.sql
@@ -0,0 +1,10 @@
+CREATE SEQUENCE trackbacks_id_seq;
+CREATE TABLE trackbacks (
+ tb_id NUMBER PRIMARY KEY,
+ tb_page NUMBER(8) REFERENCES page(page_id) ON DELETE CASCADE,
+ tb_title VARCHAR(255) NOT NULL,
+ tb_url VARCHAR(255) NOT NULL,
+ tb_ex CLOB,
+ tb_name VARCHAR(255)
+);
+CREATE INDEX tb_name_page_idx ON trackbacks(tb_page);
diff --git a/maintenance/oracle/archives/patch-transcache.sql b/maintenance/oracle/archives/patch-transcache.sql
new file mode 100644
index 00000000..62ad2c7d
--- /dev/null
+++ b/maintenance/oracle/archives/patch-transcache.sql
@@ -0,0 +1,5 @@
+CREATE TABLE transcache (
+ tc_url VARCHAR2(255) NOT NULL UNIQUE,
+ tc_contents CLOB,
+ tc_time TIMESTAMP NOT NULL
+);
diff --git a/maintenance/oracle/interwiki.sql b/maintenance/oracle/interwiki.sql
new file mode 100644
index 00000000..09d01c64
--- /dev/null
+++ b/maintenance/oracle/interwiki.sql
@@ -0,0 +1,178 @@
+-- Based more or less on the public interwiki map from MeatballWiki
+-- Default interwiki prefixes...
+
+CALL add_interwiki('abbenormal','http://www.ourpla.net/cgi-bin/pikie.cgi?$1',0);
+CALL add_interwiki('acadwiki','http://xarch.tu-graz.ac.at/autocad/wiki/$1',0);
+CALL add_interwiki('acronym','http://www.acronymfinder.com/af-query.asp?String=exact&Acronym=$1',0);
+CALL add_interwiki('advogato','http://www.advogato.org/$1',0);
+CALL add_interwiki('aiwiki','http://www.ifi.unizh.ch/ailab/aiwiki/aiw.cgi?$1',0);
+CALL add_interwiki('alife','http://news.alife.org/wiki/index.php?$1',0);
+CALL add_interwiki('annotation','http://bayle.stanford.edu/crit/nph-med.cgi/$1',0);
+CALL add_interwiki('annotationwiki','http://www.seedwiki.com/page.cfm?wikiid=368&doc=$1',0);
+CALL add_interwiki('arxiv','http://www.arxiv.org/abs/$1',0);
+CALL add_interwiki('aspienetwiki','http://aspie.mela.de/Wiki/index.php?title=$1',0);
+CALL add_interwiki('bemi','http://bemi.free.fr/vikio/index.php?$1',0);
+CALL add_interwiki('benefitswiki','http://www.benefitslink.com/cgi-bin/wiki.cgi?$1',0);
+CALL add_interwiki('brasilwiki','http://rio.ifi.unizh.ch/brasilienwiki/index.php/$1',0);
+CALL add_interwiki('bridgeswiki','http://c2.com/w2/bridges/$1',0);
+CALL add_interwiki('c2find','http://c2.com/cgi/wiki?FindPage&value=$1',0);
+CALL add_interwiki('cache','http://www.google.com/search?q=cache:$1',0);
+CALL add_interwiki('ciscavate','http://ciscavate.org/index.php/$1',0);
+CALL add_interwiki('cliki','http://ww.telent.net/cliki/$1',0);
+CALL add_interwiki('cmwiki','http://www.ourpla.net/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('codersbase','http://www.codersbase.com/$1',0);
+CALL add_interwiki('commons','http://commons.wikimedia.org/wiki/$1',0);
+CALL add_interwiki('consciousness','http://teadvus.inspiral.org/',0);
+CALL add_interwiki('corpknowpedia','http://corpknowpedia.org/wiki/index.php/$1',0);
+CALL add_interwiki('creationmatters','http://www.ourpla.net/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('dejanews','http://www.deja.com/=dnc/getdoc.xp?AN=$1',0);
+CALL add_interwiki('demokraatia','http://wiki.demokraatia.ee/',0);
+CALL add_interwiki('dictionary','http://www.dict.org/bin/Dict?Database=*&Form=Dict1&Strategy=*&Query=$1',0);
+CALL add_interwiki('disinfopedia','http://www.disinfopedia.org/wiki.phtml?title=$1',0);
+CALL add_interwiki('diveintoosx','http://diveintoosx.org/$1',0);
+CALL add_interwiki('docbook','http://docbook.org/wiki/moin.cgi/$1',0);
+CALL add_interwiki('dolphinwiki','http://www.object-arts.com/wiki/html/Dolphin/$1',0);
+CALL add_interwiki('drumcorpswiki','http://www.drumcorpswiki.com/index.php/$1',0);
+CALL add_interwiki('dwjwiki','http://www.suberic.net/cgi-bin/dwj/wiki.cgi?$1',0);
+CALL add_interwiki('eĉei','http://www.ikso.net/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('echei','http://www.ikso.net/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('ecxei','http://www.ikso.net/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('efnetceewiki','http://purl.net/wiki/c/$1',0);
+CALL add_interwiki('efnetcppwiki','http://purl.net/wiki/cpp/$1',0);
+CALL add_interwiki('efnetpythonwiki','http://purl.net/wiki/python/$1',0);
+CALL add_interwiki('efnetxmlwiki','http://purl.net/wiki/xml/$1',0);
+CALL add_interwiki('eljwiki','http://elj.sourceforge.net/phpwiki/index.php/$1',0);
+CALL add_interwiki('emacswiki','http://www.emacswiki.org/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('elibre','http://enciclopedia.us.es/index.php/$1',0);
+CALL add_interwiki('eokulturcentro','http://esperanto.toulouse.free.fr/wakka.php?wiki=$1',0);
+CALL add_interwiki('evowiki','http://www.evowiki.org/index.php/$1',0);
+CALL add_interwiki('finalempire','http://final-empire.sourceforge.net/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('firstwiki','http://firstwiki.org/index.php/$1',0);
+CALL add_interwiki('foldoc','http://www.foldoc.org/foldoc/foldoc.cgi?$1',0);
+CALL add_interwiki('foxwiki','http://fox.wikis.com/wc.dll?Wiki~$1',0);
+CALL add_interwiki('fr.be','http://fr.wikinations.be/$1',0);
+CALL add_interwiki('fr.ca','http://fr.ca.wikinations.org/$1',0);
+CALL add_interwiki('fr.fr','http://fr.fr.wikinations.org/$1',0);
+CALL add_interwiki('fr.org','http://fr.wikinations.org/$1',0);
+CALL add_interwiki('freebsdman','http://www.FreeBSD.org/cgi/man.cgi?apropos=1&query=$1',0);
+CALL add_interwiki('gamewiki','http://gamewiki.org/wiki/index.php/$1',0);
+CALL add_interwiki('gej','http://www.esperanto.de/cgi-bin/aktivikio/wiki.pl?$1',0);
+CALL add_interwiki('gentoo-wiki','http://gentoo-wiki.com/$1',0);
+CALL add_interwiki('globalvoices','http://cyber.law.harvard.edu/dyn/globalvoices/wiki/$1',0);
+CALL add_interwiki('gmailwiki','http://www.gmailwiki.com/index.php/$1',0);
+CALL add_interwiki('google','http://www.google.com/search?q=$1',0);
+CALL add_interwiki('googlegroups','http://groups.google.com/groups?q=$1',0);
+CALL add_interwiki('gotamac','http://www.got-a-mac.org/$1',0);
+CALL add_interwiki('greencheese','http://www.greencheese.org/$1',0);
+CALL add_interwiki('hammondwiki','http://www.dairiki.org/HammondWiki/index.php3?$1',0);
+CALL add_interwiki('haribeau','http://wiki.haribeau.de/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('hewikisource','http://he.wikisource.org/wiki/$1',1);
+CALL add_interwiki('herzkinderwiki','http://www.herzkinderinfo.de/Mediawiki/index.php/$1',0);
+CALL add_interwiki('hrwiki','http://www.hrwiki.org/index.php/$1',0);
+CALL add_interwiki('iawiki','http://www.IAwiki.net/$1',0);
+CALL add_interwiki('imdb','http://us.imdb.com/Title?$1',0);
+CALL add_interwiki('infosecpedia','http://www.infosecpedia.org/pedia/index.php/$1',0);
+CALL add_interwiki('jargonfile','http://sunir.org/apps/meta.pl?wiki=JargonFile&redirect=$1',0);
+CALL add_interwiki('jefo','http://www.esperanto-jeunes.org/vikio/index.php?$1',0);
+CALL add_interwiki('jiniwiki','http://www.cdegroot.com/cgi-bin/jini?$1',0);
+CALL add_interwiki('jspwiki','http://www.ecyrd.com/JSPWiki/Wiki.jsp?page=$1',0);
+CALL add_interwiki('kerimwiki','http://wiki.oxus.net/$1',0);
+CALL add_interwiki('kmwiki','http://www.voght.com/cgi-bin/pywiki?$1',0);
+CALL add_interwiki('knowhow','http://www2.iro.umontreal.ca/~paquetse/cgi-bin/wiki.cgi?$1',0);
+CALL add_interwiki('lanifexwiki','http://opt.lanifex.com/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('lasvegaswiki','http://wiki.gmnow.com/index.php/$1',0);
+CALL add_interwiki('linuxwiki','http://www.linuxwiki.de/$1',0);
+CALL add_interwiki('lojban','http://www.lojban.org/tiki/tiki-index.php?page=$1',0);
+CALL add_interwiki('lqwiki','http://wiki.linuxquestions.org/wiki/$1',0);
+CALL add_interwiki('lugkr','http://lug-kr.sourceforge.net/cgi-bin/lugwiki.pl?$1',0);
+CALL add_interwiki('lutherwiki','http://www.lutheranarchives.com/mw/index.php/$1',0);
+CALL add_interwiki('mathsongswiki','http://SeedWiki.com/page.cfm?wikiid=237&doc=$1',0);
+CALL add_interwiki('mbtest','http://www.usemod.com/cgi-bin/mbtest.pl?$1',0);
+CALL add_interwiki('meatball','http://www.usemod.com/cgi-bin/mb.pl?$1',0);
+CALL add_interwiki('mediazilla','http://bugzilla.wikipedia.org/$1',1);
+CALL add_interwiki('memoryalpha','http://www.memory-alpha.org/en/index.php/$1',0);
+CALL add_interwiki('metaweb','http://www.metaweb.com/wiki/wiki.phtml?title=$1',0);
+CALL add_interwiki('metawiki','http://sunir.org/apps/meta.pl?$1',0);
+CALL add_interwiki('metawikipedia','http://meta.wikimedia.org/wiki/$1',0);
+CALL add_interwiki('moinmoin','http://purl.net/wiki/moin/$1',0);
+CALL add_interwiki('mozillawiki','http://wiki.mozilla.org/index.php/$1',0);
+CALL add_interwiki('muweb','http://www.dunstable.com/scripts/MuWebWeb?$1',0);
+CALL add_interwiki('netvillage','http://www.netbros.com/?$1',0);
+CALL add_interwiki('oeis','http://www.research.att.com/cgi-bin/access.cgi/as/njas/sequences/eisA.cgi?Anum=$1',0);
+CALL add_interwiki('openfacts','http://openfacts.berlios.de/index.phtml?title=$1',0);
+CALL add_interwiki('openwiki','http://openwiki.com/?$1',0);
+CALL add_interwiki('opera7wiki','http://nontroppo.org/wiki/$1',0);
+CALL add_interwiki('orgpatterns','http://www.bell-labs.com/cgi-user/OrgPatterns/OrgPatterns?$1',0);
+CALL add_interwiki('osi reference model','http://wiki.tigma.ee/',0);
+CALL add_interwiki('pangalacticorg','http://www.pangalactic.org/Wiki/$1',0);
+CALL add_interwiki('personaltelco','http://www.personaltelco.net/index.cgi/$1',0);
+CALL add_interwiki('patwiki','http://gauss.ffii.org/$1',0);
+CALL add_interwiki('phpwiki','http://phpwiki.sourceforge.net/phpwiki/index.php?$1',0);
+CALL add_interwiki('pikie','http://pikie.darktech.org/cgi/pikie?$1',0);
+CALL add_interwiki('pmeg','http://www.bertilow.com/pmeg/$1.php',0);
+CALL add_interwiki('ppr','http://c2.com/cgi/wiki?$1',0);
+CALL add_interwiki('purlnet','http://purl.oclc.org/NET/$1',0);
+CALL add_interwiki('pythoninfo','http://www.python.org/cgi-bin/moinmoin/$1',0);
+CALL add_interwiki('pythonwiki','http://www.pythonwiki.de/$1',0);
+CALL add_interwiki('pywiki','http://www.voght.com/cgi-bin/pywiki?$1',0);
+CALL add_interwiki('raec','http://www.raec.clacso.edu.ar:8080/raec/Members/raecpedia/$1',0);
+CALL add_interwiki('revo','http://purl.org/NET/voko/revo/art/$1.html',0);
+CALL add_interwiki('rfc','http://www.rfc-editor.org/rfc/rfc$1.txt',0);
+CALL add_interwiki('s23wiki','http://is-root.de/wiki/index.php/$1',0);
+CALL add_interwiki('scoutpedia','http://www.scoutpedia.info/index.php/$1',0);
+CALL add_interwiki('seapig','http://www.seapig.org/$1',0);
+CALL add_interwiki('seattlewiki','http://seattlewiki.org/wiki/$1',0);
+CALL add_interwiki('seattlewireless','http://seattlewireless.net/?$1',0);
+CALL add_interwiki('seeds','http://www.IslandSeeds.org/wiki/$1',0);
+CALL add_interwiki('senseislibrary','http://senseis.xmp.net/?$1',0);
+CALL add_interwiki('shakti','http://cgi.algonet.se/htbin/cgiwrap/pgd/ShaktiWiki/$1',0);
+CALL add_interwiki('slashdot','http://slashdot.org/article.pl?sid=$1',0);
+CALL add_interwiki('smikipedia','http://www.smikipedia.org/$1',0);
+CALL add_interwiki('sockwiki','http://wiki.socklabs.com/$1',0);
+CALL add_interwiki('sourceforge','http://sourceforge.net/$1',0);
+CALL add_interwiki('squeak','http://minnow.cc.gatech.edu/squeak/$1',0);
+CALL add_interwiki('strikiwiki','http://ch.twi.tudelft.nl/~mostert/striki/teststriki.pl?$1',0);
+CALL add_interwiki('susning','http://www.susning.nu/$1',0);
+CALL add_interwiki('svgwiki','http://www.protocol7.com/svg-wiki/default.asp?$1',0);
+CALL add_interwiki('tavi','http://tavi.sourceforge.net/$1',0);
+CALL add_interwiki('tejo','http://www.tejo.org/vikio/$1',0);
+CALL add_interwiki('terrorwiki','http://www.liberalsagainstterrorism.com/wiki/index.php/$1',0);
+CALL add_interwiki('tmbw','http://www.tmbw.net/wiki/index.php/$1',0);
+CALL add_interwiki('tmnet','http://www.technomanifestos.net/?$1',0);
+CALL add_interwiki('tmwiki','http://www.EasyTopicMaps.com/?page=$1',0);
+CALL add_interwiki('turismo','http://www.tejo.org/turismo/$1',0);
+CALL add_interwiki('theopedia','http://www.theopedia.com/$1',0);
+CALL add_interwiki('twiki','http://twiki.org/cgi-bin/view/$1',0);
+CALL add_interwiki('twistedwiki','http://purl.net/wiki/twisted/$1',0);
+CALL add_interwiki('uea','http://www.tejo.org/uea/$1',0);
+CALL add_interwiki('unreal','http://wiki.beyondunreal.com/wiki/$1',0);
+CALL add_interwiki('ursine','http://ursine.ca/$1',0);
+CALL add_interwiki('usej','http://www.tejo.org/usej/$1',0);
+CALL add_interwiki('usemod','http://www.usemod.com/cgi-bin/wiki.pl?$1',0);
+CALL add_interwiki('visualworks','http://wiki.cs.uiuc.edu/VisualWorks/$1',0);
+CALL add_interwiki('warpedview','http://www.warpedview.com/index.php/$1',0);
+CALL add_interwiki('webdevwikinl','http://www.promo-it.nl/WebDevWiki/index.php?page=$1',0);
+CALL add_interwiki('webisodes','http://www.webisodes.org/$1',0);
+CALL add_interwiki('webseitzwiki','http://webseitz.fluxent.com/wiki/$1',0);
+CALL add_interwiki('why','http://clublet.com/c/c/why?$1',0);
+CALL add_interwiki('wiki','http://c2.com/cgi/wiki?$1',0);
+CALL add_interwiki('wikia','http://www.wikia.com/wiki/index.php/$1',0);
+CALL add_interwiki('wikibooks','http://en.wikibooks.org/wiki/$1',1);
+CALL add_interwiki('wikicities','http://www.wikicities.com/index.php/$1',0);
+CALL add_interwiki('wikif1','http://www.wikif1.org/$1',0);
+CALL add_interwiki('wikinfo','http://www.wikinfo.org/wiki.php?title=$1',0);
+CALL add_interwiki('wikimedia','http://wikimediafoundation.org/wiki/$1',0);
+CALL add_interwiki('wikiquote','http://en.wikiquote.org/wiki/$1',1);
+CALL add_interwiki('wikinews','http://en.wikinews.org/wiki/$1',0);
+CALL add_interwiki('wikisource','http://sources.wikipedia.org/wiki/$1',1);
+CALL add_interwiki('wikispecies','http://species.wikipedia.org/wiki/$1',1);
+CALL add_interwiki('wikitravel','http://wikitravel.org/en/$1',0);
+CALL add_interwiki('wikiworld','http://WikiWorld.com/wiki/index.php/$1',0);
+CALL add_interwiki('wiktionary','http://en.wiktionary.org/wiki/$1',1);
+CALL add_interwiki('wlug','http://www.wlug.org.nz/$1',0);
+CALL add_interwiki('wlwiki','http://winslowslair.supremepixels.net/wiki/index.php/$1',0);
+CALL add_interwiki('ypsieyeball','http://sknkwrks.dyndns.org:1957/writewiki/wiki.pl?$1',0);
+CALL add_interwiki('zwiki','http://www.zwiki.org/$1',0);
+CALL add_interwiki('zzz wiki','http://wiki.zzz.ee/',0);
+CALL add_interwiki('wikt','http://en.wiktionary.org/wiki/$1',1);
+
diff --git a/maintenance/oracle/tables.sql b/maintenance/oracle/tables.sql
new file mode 100644
index 00000000..6733f950
--- /dev/null
+++ b/maintenance/oracle/tables.sql
@@ -0,0 +1,333 @@
+-- SQL to create the initial tables for the MediaWiki database.
+-- This is read and executed by the install script; you should
+-- not have to run it by itself unless doing a manual install.
+
+CREATE SEQUENCE user_user_id_seq;
+
+CREATE TABLE "user" (
+ user_id NUMBER(5) NOT NULL PRIMARY KEY,
+ user_name VARCHAR2(255) DEFAULT '' NOT NULL,
+ user_real_name VARCHAR2(255) DEFAULT '',
+ user_password VARCHAR2(128) DEFAULT '',
+ user_newpassword VARCHAR2(128) default '',
+ user_email VARCHAR2(255) default '',
+ user_options CLOB default '',
+ user_touched TIMESTAMP WITH TIME ZONE,
+ user_token CHAR(32) default '',
+ user_email_authenticated TIMESTAMP WITH TIME ZONE DEFAULT NULL,
+ user_email_token CHAR(32),
+ user_email_token_expires TIMESTAMP WITH TIME ZONE DEFAULT NULL
+);
+CREATE UNIQUE INDEX user_name_idx ON "user" (user_name);
+CREATE INDEX user_email_token_idx ON "user" (user_email_token);
+
+CREATE TABLE user_groups (
+ ug_user NUMBER(5) DEFAULT '0' NOT NULL
+ REFERENCES "user" (user_id)
+ ON DELETE CASCADE,
+ ug_group VARCHAR2(16) NOT NULL,
+ CONSTRAINT user_groups_pk PRIMARY KEY (ug_user, ug_group)
+);
+CREATE INDEX user_groups_group_idx ON user_groups(ug_group);
+
+CREATE TABLE user_newtalk (
+ user_id NUMBER(5) DEFAULT 0 NOT NULL,
+ user_ip VARCHAR2(40) DEFAULT '' NOT NULL
+);
+CREATE INDEX user_newtalk_id_idx ON user_newtalk(user_id);
+CREATE INDEX user_newtalk_ip_idx ON user_newtalk(user_ip);
+
+CREATE SEQUENCE page_page_id_seq;
+CREATE TABLE page (
+ page_id NUMBER(8) NOT NULL PRIMARY KEY,
+ page_namespace NUMBER(5) NOT NULL,
+ page_title VARCHAR(255) NOT NULL,
+ page_restrictions CLOB DEFAULT '',
+ page_counter NUMBER(20) DEFAULT 0 NOT NULL,
+ page_is_redirect NUMBER(1) DEFAULT 0 NOT NULL,
+ page_is_new NUMBER(1) DEFAULT 0 NOT NULL,
+ page_random NUMBER(25, 24) NOT NULL,
+ page_touched TIMESTAMP WITH TIME ZONE,
+ page_latest NUMBER(8) NOT NULL,
+ page_len NUMBER(8) DEFAULT 0
+);
+CREATE UNIQUE INDEX page_id_namespace_title_idx ON page(page_namespace, page_title);
+CREATE INDEX page_random_idx ON page(page_random);
+CREATE INDEX page_len_idx ON page(page_len);
+
+CREATE SEQUENCE rev_rev_id_val;
+CREATE TABLE revision (
+ rev_id NUMBER(8) NOT NULL,
+ rev_page NUMBER(8) NOT NULL
+ REFERENCES page (page_id)
+ ON DELETE CASCADE,
+ rev_text_id NUMBER(8) NOT NULL,
+ rev_comment CLOB,
+ rev_user NUMBER(8) DEFAULT 0 NOT NULL,
+ rev_user_text VARCHAR2(255) DEFAULT '' NOT NULL,
+ rev_timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
+ rev_minor_edit NUMBER(1) DEFAULT 0 NOT NULL,
+ rev_deleted NUMBER(1) DEFAULT 0 NOT NULL,
+ CONSTRAINT revision_pk PRIMARY KEY (rev_page, rev_id)
+);
+
+CREATE UNIQUE INDEX rev_id_idx ON revision(rev_id);
+CREATE INDEX rev_timestamp_idx ON revision(rev_timestamp);
+CREATE INDEX rev_page_timestamp_idx ON revision(rev_page, rev_timestamp);
+CREATE INDEX rev_user_timestamp_idx ON revision(rev_user, rev_timestamp);
+CREATE INDEX rev_usertext_timestamp_idx ON revision(rev_user_text, rev_timestamp);
+
+CREATE SEQUENCE text_old_id_val;
+
+CREATE TABLE text (
+ old_id NUMBER(8) NOT NULL,
+ old_text CLOB,
+ old_flags CLOB,
+ CONSTRAINT text_pk PRIMARY KEY (old_id)
+);
+
+CREATE TABLE archive (
+ ar_namespace NUMBER(5) NOT NULL,
+ ar_title VARCHAR2(255) NOT NULL,
+ ar_text CLOB,
+ ar_comment CLOB,
+ ar_user NUMBER(8),
+ ar_user_text VARCHAR2(255) NOT NULL,
+ ar_timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
+ ar_minor_edit NUMBER(1) DEFAULT 0 NOT NULL,
+ ar_flags CLOB,
+ ar_rev_id NUMBER(8),
+ ar_text_id NUMBER(8)
+);
+CREATE INDEX archive_name_title_timestamp ON archive(ar_namespace,ar_title,ar_timestamp);
+
+CREATE TABLE pagelinks (
+ pl_from NUMBER(8) NOT NULL
+ REFERENCES page(page_id)
+ ON DELETE CASCADE,
+ pl_namespace NUMBER(4) DEFAULT 0 NOT NULL,
+ pl_title VARCHAR2(255) NOT NULL
+);
+CREATE UNIQUE INDEX pl_from ON pagelinks(pl_from, pl_namespace, pl_title);
+CREATE INDEX pl_namespace ON pagelinks(pl_namespace, pl_title);
+
+CREATE TABLE imagelinks (
+ il_from NUMBER(8) NOT NULL REFERENCES page(page_id) ON DELETE CASCADE,
+ il_to VARCHAR2(255) NOT NULL
+);
+CREATE UNIQUE INDEX il_from ON imagelinks(il_from, il_to);
+CREATE INDEX il_to ON imagelinks(il_to);
+
+CREATE TABLE categorylinks (
+ cl_from NUMBER(8) NOT NULL REFERENCES page(page_id) ON DELETE CASCADE,
+ cl_to VARCHAR2(255) NOT NULL,
+ cl_sortkey VARCHAR2(86) default '',
+ cl_timestamp TIMESTAMP WITH TIME ZONE NOT NULL
+);
+CREATE UNIQUE INDEX cl_from ON categorylinks(cl_from, cl_to);
+CREATE INDEX cl_sortkey ON categorylinks(cl_to, cl_sortkey);
+CREATE INDEX cl_timestamp ON categorylinks(cl_to, cl_timestamp);
+
+--
+-- Contains a single row with some aggregate info
+-- on the state of the site.
+--
+CREATE TABLE site_stats (
+ ss_row_id NUMBER(8) NOT NULL,
+ ss_total_views NUMBER(20) default 0,
+ ss_total_edits NUMBER(20) default 0,
+ ss_good_articles NUMBER(20) default 0,
+ ss_total_pages NUMBER(20) default -1,
+ ss_users NUMBER(20) default -1,
+ ss_admins NUMBER(10) default -1
+);
+CREATE UNIQUE INDEX ss_row_id ON site_stats(ss_row_id);
+
+--
+-- Stores an ID for every time any article is visited;
+-- depending on $wgHitcounterUpdateFreq, it is
+-- periodically cleared and the page_counter column
+-- in the page table updated for the all articles
+-- that have been visited.)
+--
+CREATE TABLE hitcounter (
+ hc_id NUMBER NOT NULL
+);
+
+--
+-- The internet is full of jerks, alas. Sometimes it's handy
+-- to block a vandal or troll account.
+--
+CREATE SEQUENCE ipblocks_ipb_id_val;
+CREATE TABLE ipblocks (
+ ipb_id NUMBER(8) NOT NULL,
+ ipb_address VARCHAR2(40),
+ ipb_user NUMBER(8),
+ ipb_by NUMBER(8) NOT NULL
+ REFERENCES "user" (user_id)
+ ON DELETE CASCADE,
+ ipb_reason CLOB,
+ ipb_timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
+ ipb_auto NUMBER(1) DEFAULT 0 NOT NULL,
+ ipb_expiry TIMESTAMP WITH TIME ZONE,
+ CONSTRAINT ipblocks_pk PRIMARY KEY (ipb_id)
+);
+CREATE INDEX ipb_address ON ipblocks(ipb_address);
+CREATE INDEX ipb_user ON ipblocks(ipb_user);
+
+CREATE TABLE image (
+ img_name VARCHAR2(255) NOT NULL,
+ img_size NUMBER(8) NOT NULL,
+ img_width NUMBER(5) NOT NULL,
+ img_height NUMBER(5) NOT NULL,
+ img_metadata CLOB,
+ img_bits NUMBER(3),
+ img_media_type VARCHAR2(10),
+ img_major_mime VARCHAR2(12) DEFAULT 'unknown',
+ img_minor_mime VARCHAR2(32) DEFAULT 'unknown',
+ img_description CLOB NOT NULL,
+ img_user NUMBER(8) NOT NULL REFERENCES "user"(user_id) ON DELETE CASCADE,
+ img_user_text VARCHAR2(255) NOT NULL,
+ img_timestamp TIMESTAMP WITH TIME ZONE,
+ CONSTRAINT image_pk PRIMARY KEY (img_name)
+);
+CREATE INDEX img_size_idx ON image(img_size);
+CREATE INDEX img_timestamp_idx ON image(img_timestamp);
+
+CREATE TABLE oldimage (
+ oi_name VARCHAR2(255) NOT NULL,
+ oi_archive_name VARCHAR2(255) NOT NULL,
+ oi_size NUMBER(8) NOT NULL,
+ oi_width NUMBER(5) NOT NULL,
+ oi_height NUMBER(5) NOT NULL,
+ oi_bits NUMBER(3) NOT NULL,
+ oi_description CLOB,
+ oi_user NUMBER(8) NOT NULL REFERENCES "user"(user_id),
+ oi_user_text VARCHAR2(255) NOT NULL,
+ oi_timestamp TIMESTAMP WITH TIME ZONE NOT NULL
+);
+CREATE INDEX oi_name ON oldimage (oi_name);
+
+CREATE SEQUENCE rc_rc_id_seq;
+CREATE TABLE recentchanges (
+ rc_id NUMBER(8) NOT NULL,
+ rc_timestamp TIMESTAMP WITH TIME ZONE,
+ rc_cur_time TIMESTAMP WITH TIME ZONE,
+ rc_user NUMBER(8) DEFAULT 0 NOT NULL,
+ rc_user_text VARCHAR2(255),
+ rc_namespace NUMBER(4) DEFAULT 0 NOT NULL,
+ rc_title VARCHAR2(255) NOT NULL,
+ rc_comment VARCHAR2(255),
+ rc_minor NUMBER(3) DEFAULT 0 NOT NULL,
+ rc_bot NUMBER(3) DEFAULT 0 NOT NULL,
+ rc_new NUMBER(3) DEFAULT 0 NOT NULL,
+ rc_cur_id NUMBER(8),
+ rc_this_oldid NUMBER(8) NOT NULL,
+ rc_last_oldid NUMBER(8) NOT NULL,
+ rc_type NUMBER(3) DEFAULT 0 NOT NULL,
+ rc_moved_to_ns NUMBER(3),
+ rc_moved_to_title VARCHAR2(255),
+ rc_patrolled NUMBER(3) DEFAULT 0 NOT NULL,
+ rc_ip VARCHAR2(40),
+ CONSTRAINT rc_pk PRIMARY KEY (rc_id)
+);
+CREATE INDEX rc_timestamp ON recentchanges (rc_timestamp);
+CREATE INDEX rc_namespace_title ON recentchanges(rc_namespace, rc_title);
+CREATE INDEX rc_cur_id ON recentchanges(rc_cur_id);
+CREATE INDEX new_name_timestamp ON recentchanges(rc_new, rc_namespace, rc_timestamp);
+CREATE INDEX rc_ip ON recentchanges(rc_ip);
+
+CREATE TABLE watchlist (
+ wl_user NUMBER(8) NOT NULL
+ REFERENCES "user"(user_id)
+ ON DELETE CASCADE,
+ wl_namespace NUMBER(8) DEFAULT 0 NOT NULL,
+ wl_title VARCHAR2(255) NOT NULL,
+ wl_notificationtimestamp TIMESTAMP WITH TIME ZONE DEFAULT NULL
+);
+CREATE UNIQUE INDEX wl_user_namespace_title ON watchlist
+ (wl_user, wl_namespace, wl_title);
+CREATE INDEX wl_namespace_title ON watchlist(wl_namespace, wl_title);
+
+--
+-- Used by texvc math-rendering extension to keep track
+-- of previously-rendered items.
+--
+CREATE TABLE math (
+ math_inputhash VARCHAR2(16) NOT NULL UNIQUE,
+ math_outputhash VARCHAR2(16) NOT NULL,
+ math_html_conservativeness NUMBER(1) NOT NULL,
+ math_html CLOB,
+ math_mathml CLOB
+);
+
+--
+-- Recognized interwiki link prefixes
+--
+CREATE TABLE interwiki (
+ iw_prefix VARCHAR2(32) NOT NULL UNIQUE,
+ iw_url VARCHAR2(127) NOT NULL,
+ iw_local NUMBER(1) NOT NULL,
+ iw_trans NUMBER(1) DEFAULT 0 NOT NULL
+);
+
+CREATE TABLE querycache (
+ qc_type VARCHAR2(32) NOT NULL,
+ qc_value NUMBER(5) DEFAULT 0 NOT NULL,
+ qc_namespace NUMBER(4) DEFAULT 0 NOT NULL,
+ qc_title VARCHAR2(255)
+);
+CREATE INDEX querycache_type_value ON querycache(qc_type, qc_value);
+
+--
+-- For a few generic cache operations if not using Memcached
+--
+CREATE TABLE objectcache (
+ keyname CHAR(255) DEFAULT '',
+ value CLOB,
+ exptime TIMESTAMP WITH TIME ZONE
+);
+CREATE UNIQUE INDEX oc_keyname_idx ON objectcache(keyname);
+CREATE INDEX oc_exptime_idx ON objectcache(exptime);
+
+CREATE TABLE logging (
+ log_type VARCHAR2(10) NOT NULL,
+ log_action VARCHAR2(10) NOT NULL,
+ log_timestamp TIMESTAMP WITH TIME ZONE NOT NULL,
+ log_user NUMBER(8) REFERENCES "user"(user_id),
+ log_namespace NUMBER(4),
+ log_title VARCHAR2(255) NOT NULL,
+ log_comment VARCHAR2(255),
+ log_params CLOB
+);
+CREATE INDEX logging_type_name ON logging(log_type, log_timestamp);
+CREATE INDEX logging_user_time ON logging(log_user, log_timestamp);
+CREATE INDEX logging_page_time ON logging(log_namespace, log_title, log_timestamp);
+
+-- Hold group name and description
+--CREATE TABLE /*$wgDBprefix*/groups (
+-- gr_id int(5) unsigned NOT NULL auto_increment,
+-- gr_name varchar(50) NOT NULL default '',
+-- gr_description varchar(255) NOT NULL default '',
+-- gr_rights tinyblob,
+-- PRIMARY KEY (gr_id)
+--
+--) TYPE=InnoDB;
+
+CREATE OR REPLACE PROCEDURE add_user_right (name VARCHAR2, new_right VARCHAR2) AS
+ user_id "user".user_id%TYPE;;
+ user_is_missing EXCEPTION;;
+BEGIN
+ SELECT user_id INTO user_id FROM "user" WHERE user_name = name;;
+ INSERT INTO user_groups (ug_user, ug_group) VALUES(user_id, new_right);;
+EXCEPTION
+ WHEN NO_DATA_FOUND THEN
+ DBMS_OUTPUT.PUT_LINE('The specified user does not exist.');;
+END add_user_right;;
+;
+
+CREATE OR REPLACE PROCEDURE add_interwiki (prefix VARCHAR2, url VARCHAR2, is_local NUMBER) AS
+BEGIN
+ INSERT INTO interwiki (iw_prefix, iw_url, iw_local) VALUES(prefix, url, is_local);;
+END add_interwiki;;
+; \ No newline at end of file
diff --git a/maintenance/orphans.php b/maintenance/orphans.php
new file mode 100644
index 00000000..3bfa79f5
--- /dev/null
+++ b/maintenance/orphans.php
@@ -0,0 +1,207 @@
+<?php
+# Copyright (C) 2005 Brion Vibber <brion@pobox.com>
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+/**
+ * Look for 'orphan' revisions hooked to pages which don't exist
+ * And 'childless' pages with no revisions.
+ * Then, kill the poor widows and orphans.
+ * Man this is depressing.
+ *
+ * @author <brion@pobox.com>
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+$options = array( 'fix' );
+
+/** */
+require_once( 'commandLine.inc' );
+$wgTitle = Title::newFromText( 'Orphan revision cleanup script' );
+
+checkOrphans( isset( $options['fix'] ) );
+checkSeparation( isset( $options['fix'] ) );
+#checkWidows( isset( $options['fix'] ) );
+
+# ------
+
+function checkOrphans( $fix ) {
+ $dbw =& wfGetDB( DB_MASTER );
+ $page = $dbw->tableName( 'page' );
+ $revision = $dbw->tableName( 'revision' );
+
+ if( $fix ) {
+ $dbw->query( "LOCK TABLES $page WRITE, $revision WRITE" );
+ }
+
+ echo "Checking for orphan revision table entries... (this may take a while on a large wiki)\n";
+ $result = $dbw->query( "
+ SELECT *
+ FROM $revision LEFT OUTER JOIN $page ON rev_page=page_id
+ WHERE page_id IS NULL
+ ");
+ $orphans = $dbw->numRows( $result );
+ if( $orphans > 0 ) {
+ global $wgContLang;
+ echo "$orphans orphan revisions...\n";
+ printf( "%10s %10s %14s %20s %s\n", 'rev_id', 'rev_page', 'rev_timestamp', 'rev_user_text', 'rev_comment' );
+ while( $row = $dbw->fetchObject( $result ) ) {
+ $comment = ( $row->rev_comment == '' )
+ ? ''
+ : '(' . $wgContLang->truncate( $row->rev_comment, 40, '...' ) . ')';
+ printf( "%10d %10d %14s %20s %s\n",
+ $row->rev_id,
+ $row->rev_page,
+ $row->rev_timestamp,
+ $wgContLang->truncate( $row->rev_user_text, 17, '...' ),
+ $comment );
+ if( $fix ) {
+ $dbw->delete( 'revision', array( 'rev_id' => $row->rev_id ) );
+ }
+ }
+ if( !$fix ) {
+ echo "Run again with --fix to remove these entries automatically.\n";
+ }
+ } else {
+ echo "No orphans! Yay!\n";
+ }
+
+ if( $fix ) {
+ $dbw->query( "UNLOCK TABLES" );
+ }
+}
+
+/**
+ * @todo DON'T USE THIS YET! It will remove entries which have children,
+ * but which aren't properly attached (eg if page_latest is bogus
+ * but valid revisions do exist)
+ */
+function checkWidows( $fix ) {
+ $dbw =& wfGetDB( DB_MASTER );
+ $page = $dbw->tableName( 'page' );
+ $revision = $dbw->tableName( 'revision' );
+
+ if( $fix ) {
+ $dbw->query( "LOCK TABLES $page WRITE, $revision WRITE" );
+ }
+
+ echo "\nChecking for childless page table entries... (this may take a while on a large wiki)\n";
+ $result = $dbw->query( "
+ SELECT *
+ FROM $page LEFT OUTER JOIN $revision ON page_latest=rev_id
+ WHERE rev_id IS NULL
+ ");
+ $widows = $dbw->numRows( $result );
+ if( $widows > 0 ) {
+ global $wgContLang;
+ echo "$widows childless pages...\n";
+ printf( "%10s %11s %2s %s\n", 'page_id', 'page_latest', 'ns', 'page_title' );
+ while( $row = $dbw->fetchObject( $result ) ) {
+ printf( "%10d %11d %2d %s\n",
+ $row->page_id,
+ $row->page_latest,
+ $row->page_namespace,
+ $row->page_title );
+ if( $fix ) {
+ $dbw->delete( 'page', array( 'page_id' => $row->page_id ) );
+ }
+ }
+ if( !$fix ) {
+ echo "Run again with --fix to remove these entries automatically.\n";
+ }
+ } else {
+ echo "No childless pages! Yay!\n";
+ }
+
+ if( $fix ) {
+ $dbw->query( "UNLOCK TABLES" );
+ }
+}
+
+
+function checkSeparation( $fix ) {
+ $dbw =& wfGetDB( DB_MASTER );
+ $page = $dbw->tableName( 'page' );
+ $revision = $dbw->tableName( 'revision' );
+ $text = $dbw->tableName( 'text' );
+
+ if( $fix ) {
+ $dbw->query( "LOCK TABLES $page WRITE, $revision WRITE, $text WRITE" );
+ }
+
+ echo "\nChecking for pages whose page_latest links are incorrect... (this may take a while on a large wiki)\n";
+ $result = $dbw->query( "
+ SELECT *
+ FROM $page LEFT OUTER JOIN $revision ON page_latest=rev_id
+ ");
+ $found = 0;
+ while( $row = $dbw->fetchObject( $result ) ) {
+ $result2 = $dbw->query( "
+ SELECT MAX(rev_timestamp) as max_timestamp
+ FROM $revision
+ WHERE rev_page=$row->page_id
+ " );
+ $row2 = $dbw->fetchObject( $result2 );
+ $dbw->freeResult( $result2 );
+ if( $row2 ) {
+ if( $row->rev_timestamp != $row2->max_timestamp ) {
+ if( $found == 0 ) {
+ printf( "%10s %10s %14s %14s\n",
+ 'page_id', 'rev_id', 'timestamp', 'max timestamp' );
+ }
+ ++$found;
+ printf( "%10d %10d %14s %14s\n",
+ $row->page_id,
+ $row->page_latest,
+ $row->rev_timestamp,
+ $row2->max_timestamp );
+ if( $fix ) {
+ # ...
+ $maxId = $dbw->selectField(
+ 'revision',
+ 'rev_id',
+ array(
+ 'rev_page' => $row->page_id,
+ 'rev_timestamp' => $row2->max_timestamp ) );
+ echo "... updating to revision $maxId\n";
+ $maxRev = Revision::newFromId( $maxId );
+ $title = Title::makeTitle( $row->page_namespace, $row->page_title );
+ $article = new Article( $title );
+ $article->updateRevisionOn( $dbw, $maxRev );
+ }
+ }
+ } else {
+ echo "wtf\n";
+ }
+ }
+
+ if( $found ) {
+ echo "Found $found pages with incorrect latest revision.\n";
+ } else {
+ echo "No pages with incorrect latest revision. Yay!\n";
+ }
+ if( !$fix && $found > 0 ) {
+ echo "Run again with --fix to remove these entries automatically.\n";
+ }
+
+ if( $fix ) {
+ $dbw->query( "UNLOCK TABLES" );
+ }
+}
+
+?> \ No newline at end of file
diff --git a/maintenance/ourusers.php b/maintenance/ourusers.php
new file mode 100644
index 00000000..0d625571
--- /dev/null
+++ b/maintenance/ourusers.php
@@ -0,0 +1,121 @@
+<?php
+/**
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+$wikiuser_pass = `wikiuser_pass`;
+$wikiadmin_pass = `wikiadmin_pass`;
+$wikisql_pass = `wikisql_pass`;
+
+if ( @$argv[1] == 'yaseo' ) {
+ $hosts = array(
+ 'localhost',
+ '211.115.107.158',
+ '211.115.107.159',
+ '211.115.107.160',
+ '211.115.107.138',
+ '211.115.107.139',
+ '211.115.107.140',
+ '211.115.107.141',
+ '211.115.107.142',
+ '211.115.107.143',
+ '211.115.107.144',
+ '211.115.107.145',
+ '211.115.107.146',
+ '211.115.107.147',
+ '211.115.107.148',
+ '211.115.107.149',
+ '211.115.107.150',
+ '211.115.107.152',
+ '211.115.107.153',
+ '211.115.107.154',
+ '211.115.107.155',
+ '211.115.107.156',
+ '211.115.107.157',
+ );
+} else {
+ $hosts = array(
+ 'localhost',
+ '207.142.131.194',
+ '207.142.131.195',
+ '207.142.131.196',
+ '207.142.131.197',
+ '207.142.131.198',
+ '207.142.131.199',
+ '207.142.131.221',
+ '207.142.131.226',
+ '207.142.131.227',
+ '207.142.131.228',
+ '207.142.131.229',
+ '207.142.131.230',
+ '207.142.131.231',
+ '207.142.131.232',
+ '207.142.131.233',
+ '207.142.131.234',
+ '207.142.131.237',
+ '207.142.131.238',
+ '207.142.131.239',
+ '207.142.131.243',
+ '207.142.131.244',
+ '207.142.131.249',
+ '207.142.131.250',
+ '207.142.131.216',
+ '10.0.%',
+ );
+}
+
+$databases = array(
+ '%wikibooks',
+ '%wiki',
+ '%wikiquote',
+ '%wiktionary',
+ '%wikisource',
+ '%wikinews',
+ '%wikiversity',
+ '%wikimedia',
+);
+
+foreach( $hosts as $host ) {
+ print "--\n-- $host\n--\n\n-- wikiuser\n\n";
+ print "GRANT REPLICATION CLIENT,PROCESS ON *.* TO 'wikiuser'@'$host' IDENTIFIED BY '$wikiuser_pass';\n";
+ print "GRANT ALL PRIVILEGES ON `boardvote`.* TO 'wikiuser'@'$host' IDENTIFIED BY '$wikiuser_pass';\n";
+ print "GRANT ALL PRIVILEGES ON `boardvote2005`.* TO 'wikiuser'@'$host' IDENTIFIED BY '$wikiuser_pass';\n";
+ foreach( $databases as $db ) {
+ print "GRANT SELECT, INSERT, UPDATE, DELETE ON `$db`.* TO 'wikiuser'@'$host' IDENTIFIED BY '$wikiuser_pass';\n";
+ }
+
+/*
+ print "\n-- wikisql\n\n";
+ foreach ( $databases as $db ) {
+print <<<EOS
+GRANT SELECT ON `$db`.`old` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`imagelinks` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`image` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`watchlist` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`site_stats` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`archive` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`links` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`ipblocks` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`cur` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT (user_rights, user_id, user_name, user_options) ON `$db`.`user` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`oldimage` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`recentchanges` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`math` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+GRANT SELECT ON `$db`.`brokenlinks` TO 'wikisql'@'$host' IDENTIFIED BY '$wikisql_pass';
+
+EOS;
+ }*/
+
+ print "\n-- wikiadmin\n\n";
+ print "GRANT PROCESS, REPLICATION CLIENT ON *.* TO 'wikiadmin'@'$host' IDENTIFIED BY '$wikiadmin_pass';\n";
+ print "GRANT ALL PRIVILEGES ON `boardvote`.* TO wikiadmin@'$host' IDENTIFIED BY '$wikiadmin_pass';\n";
+ print "GRANT ALL PRIVILEGES ON `boardvote2005`.* TO wikiadmin@'$host' IDENTIFIED BY '$wikiadmin_pass';\n";
+ foreach ( $databases as $db ) {
+ print "GRANT ALL PRIVILEGES ON `$db`.* TO wikiadmin@'$host' IDENTIFIED BY '$wikiadmin_pass';\n";
+ }
+ print "\n";
+}
+?>
diff --git a/maintenance/parserTests.inc b/maintenance/parserTests.inc
new file mode 100644
index 00000000..9f93c4ac
--- /dev/null
+++ b/maintenance/parserTests.inc
@@ -0,0 +1,791 @@
+<?php
+# Copyright (C) 2004 Brion Vibber <brion@pobox.com>
+# http://www.mediawiki.org/
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation; either version 2 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+# http://www.gnu.org/copyleft/gpl.html
+
+/**
+ * @todo Make this more independent of the configuration (and if possible the database)
+ * @todo document
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+
+/** */
+$options = array( 'quick', 'color', 'quiet', 'help' );
+$optionsWithArgs = array( 'regex' );
+
+require_once( 'commandLine.inc' );
+require_once( "$IP/includes/ObjectCache.php" );
+require_once( "$IP/includes/BagOStuff.php" );
+require_once( "$IP/languages/LanguageUtf8.php" );
+require_once( "$IP/includes/Hooks.php" );
+require_once( "$IP/maintenance/parserTestsParserHook.php" );
+require_once( "$IP/maintenance/parserTestsStaticParserHook.php" );
+require_once( "$IP/maintenance/parserTestsParserTime.php" );
+
+/**
+ * @package MediaWiki
+ * @subpackage Maintenance
+ */
+class ParserTest {
+ /**
+ * boolean $color whereas output should be colorized
+ * @private
+ */
+ var $color;
+
+ /**
+ * boolean $lightcolor whereas output should use light colors
+ * @private
+ */
+ var $lightcolor;
+
+ /**
+ * Sets terminal colorization and diff/quick modes depending on OS and
+ * command-line options (--color and --quick).
+ *
+ * @public
+ */
+ function ParserTest() {
+ global $options;
+
+ # Only colorize output if stdout is a terminal.
+ $this->lightcolor = false;
+ $this->color = !wfIsWindows() && posix_isatty(1);
+
+ if( isset( $options['color'] ) ) {
+ switch( $options['color'] ) {
+ case 'no':
+ $this->color = false;
+ break;
+ case 'light':
+ $this->lightcolor = true;
+ # Fall through
+ case 'yes':
+ default:
+ $this->color = true;
+ break;
+ }
+ }
+
+ $this->showDiffs = !isset( $options['quick'] );
+
+ $this->quiet = isset( $options['quiet'] );
+
+ if (isset($options['regex'])) {
+ $this->regex = $options['regex'];
+ } else {
+ # Matches anything
+ $this->regex = '';
+ }
+
+ $this->hooks = array();
+ }
+
+ /**
+ * Remove last character if it is a newline
+ * @private
+ */
+ function chomp($s) {
+ if (substr($s, -1) === "\n") {
+ return substr($s, 0, -1);
+ }
+ else {
+ return $s;
+ }
+ }
+
+ /**
+ * Run a series of tests listed in the given text file.
+ * Each test consists of a brief description, wikitext input,
+ * and the expected HTML output.
+ *
+ * Prints status updates on stdout and counts up the total
+ * number and percentage of passed tests.
+ *
+ * @param string $filename
+ * @return bool True if passed all tests, false if any tests failed.
+ * @public
+ */
+ function runTestsFromFile( $filename ) {
+ $infile = fopen( $filename, 'rt' );
+ if( !$infile ) {
+ wfDie( "Couldn't open $filename\n" );
+ }
+
+ $data = array();
+ $section = null;
+ $success = 0;
+ $total = 0;
+ $n = 0;
+ while( false !== ($line = fgets( $infile ) ) ) {
+ $n++;
+ if( preg_match( '/^!!\s*(\w+)/', $line, $matches ) ) {
+ $section = strtolower( $matches[1] );
+ if( $section == 'endarticle') {
+ if( !isset( $data['text'] ) ) {
+ wfDie( "'endarticle' without 'text' at line $n\n" );
+ }
+ if( !isset( $data['article'] ) ) {
+ wfDie( "'endarticle' without 'article' at line $n\n" );
+ }
+ $this->addArticle($this->chomp($data['article']), $this->chomp($data['text']), $n);
+ $data = array();
+ $section = null;
+ continue;
+ }
+ if( $section == 'endhooks' ) {
+ if( !isset( $data['hooks'] ) ) {
+ wfDie( "'endhooks' without 'hooks' at line $n\n" );
+ }
+ foreach( explode( "\n", $data['hooks'] ) as $line ) {
+ $line = trim( $line );
+ if( $line ) {
+ $this->requireHook( $line );
+ }
+ }
+ $data = array();
+ $section = null;
+ continue;
+ }
+ if( $section == 'end' ) {
+ if( !isset( $data['test'] ) ) {
+ wfDie( "'end' without 'test' at line $n\n" );
+ }
+ if( !isset( $data['input'] ) ) {
+ wfDie( "'end' without 'input' at line $n\n" );
+ }
+ if( !isset( $data['result'] ) ) {
+ wfDie( "'end' without 'result' at line $n\n" );
+ }
+ if( !isset( $data['options'] ) ) {
+ $data['options'] = '';
+ }
+ else {
+ $data['options'] = $this->chomp( $data['options'] );
+ }
+ if (preg_match('/\\bdisabled\\b/i', $data['options'])
+ || !preg_match("/{$this->regex}/i", $data['test'])) {
+ # disabled test
+ $data = array();
+ $section = null;
+ continue;
+ }
+ if( $this->runTest(
+ $this->chomp( $data['test'] ),
+ $this->chomp( $data['input'] ),
+ $this->chomp( $data['result'] ),
+ $this->chomp( $data['options'] ) ) ) {
+ $success++;
+ }
+ $total++;
+ $data = array();
+ $section = null;
+ continue;
+ }
+ if ( isset ($data[$section] ) ) {
+ wfDie( "duplicate section '$section' at line $n\n" );
+ }
+ $data[$section] = '';
+ continue;
+ }
+ if( $section ) {
+ $data[$section] .= $line;
+ }
+ }
+ if( $total > 0 ) {
+ $ratio = wfPercent( 100 * $success / $total );
+ print $this->termColor( 1 ) . "\nPassed $success of $total tests ($ratio) ";
+ if( $success == $total ) {
+ print $this->termColor( 32 ) . "PASSED!";
+ } else {
+ print $this->termColor( 31 ) . "FAILED!";
+ }
+ print $this->termReset() . "\n";
+ return ($success == $total);
+ } else {
+ wfDie( "No tests found.\n" );
+ }
+ }
+
+ /**
+ * Run a given wikitext input through a freshly-constructed wiki parser,
+ * and compare the output against the expected results.
+ * Prints status and explanatory messages to stdout.
+ *
+ * @param string $input Wikitext to try rendering
+ * @param string $result Result to output
+ * @return bool
+ */
+ function runTest( $desc, $input, $result, $opts ) {
+ if( !$this->quiet ) {
+ $this->showTesting( $desc );
+ }
+
+ $this->setupGlobals($opts);
+
+ $user =& new User();
+ $options = ParserOptions::newFromUser( $user );
+
+ if (preg_match('/\\bmath\\b/i', $opts)) {
+ # XXX this should probably be done by the ParserOptions
+ $options->setUseTex(true);
+ }
+
+ if (preg_match('/title=\[\[(.*)\]\]/', $opts, $m)) {
+ $titleText = $m[1];
+ }
+ else {
+ $titleText = 'Parser test';
+ }
+
+ $noxml = (bool)preg_match( '~\\b noxml \\b~x', $opts );
+
+ $parser =& new Parser();
+ foreach( $this->hooks as $tag => $callback ) {
+ $parser->setHook( $tag, $callback );
+ }
+ wfRunHooks( 'ParserTestParser', array( &$parser ) );
+
+ $title =& Title::makeTitle( NS_MAIN, $titleText );
+
+ if (preg_match('/\\bpst\\b/i', $opts)) {
+ $out = $parser->preSaveTransform( $input, $title, $user, $options );
+ } elseif (preg_match('/\\bmsg\\b/i', $opts)) {
+ $out = $parser->transformMsg( $input, $options );
+ } elseif( preg_match( '/\\bsection=(\d+)\b/i', $opts, $matches ) ) {
+ $section = intval( $matches[1] );
+ $out = $parser->getSection( $input, $section );
+ } elseif( preg_match( '/\\breplace=(\d+),"(.*?)"/i', $opts, $matches ) ) {
+ $section = intval( $matches[1] );
+ $replace = $matches[2];
+ $out = $parser->replaceSection( $input, $section, $replace );
+ } else {
+ $output = $parser->parse( $input, $title, $options, true, true, 1337 );
+ $out = $output->getText();
+
+ if (preg_match('/\\bill\\b/i', $opts)) {
+ $out = $this->tidy( implode( ' ', $output->getLanguageLinks() ) );
+ } else if (preg_match('/\\bcat\\b/i', $opts)) {
+ global $wgOut;
+ $wgOut->addCategoryLinks($output->getCategories());
+ $out = $this->tidy ( implode( ' ', $wgOut->getCategoryLinks() ) );
+ }
+
+ $result = $this->tidy($result);
+ }
+
+ $this->teardownGlobals();
+
+ if( $result === $out && ( $noxml === true || $this->wellFormed( $out ) ) ) {
+ return $this->showSuccess( $desc );
+ } else {
+ return $this->showFailure( $desc, $result, $out );
+ }
+ }
+
+ /**
+ * Set up the global variables for a consistent environment for each test.
+ * Ideally this should replace the global configuration entirely.
+ *
+ * @private
+ */
+ function setupGlobals($opts = '') {
+ # Save the prefixed / quoted table names for later use when we make the temporaries.
+ $db =& wfGetDB( DB_READ );
+ $this->oldTableNames = array();
+ foreach( $this->listTables() as $table ) {
+ $this->oldTableNames[$table] = $db->tableName( $table );
+ }
+ if( !isset( $this->uploadDir ) ) {
+ $this->uploadDir = $this->setupUploadDir();
+ }
+
+ if( preg_match( '/language=([a-z]+(?:_[a-z]+)?)/', $opts, $m ) ) {
+ $lang = $m[1];
+ } else {
+ $lang = 'en';
+ }
+
+ $settings = array(
+ 'wgServer' => 'http://localhost',
+ 'wgScript' => '/index.php',
+ 'wgScriptPath' => '/',
+ 'wgArticlePath' => '/wiki/$1',
+ 'wgActionPaths' => array(),
+ 'wgUploadPath' => 'http://example.com/images',
+ 'wgUploadDirectory' => $this->uploadDir,
+ 'wgStyleSheetPath' => '/skins',
+ 'wgSitename' => 'MediaWiki',
+ 'wgServerName' => 'Britney Spears',
+ 'wgLanguageCode' => $lang,
+ 'wgContLanguageCode' => $lang,
+ 'wgDBprefix' => 'parsertest_',
+ 'wgDefaultUserOptions' => array(),
+
+ 'wgLang' => null,
+ 'wgContLang' => null,
+ 'wgNamespacesWithSubpages' => array( 0 => preg_match('/\\bsubpage\\b/i', $opts)),
+ 'wgMaxTocLevel' => 999,
+ 'wgCapitalLinks' => true,
+ 'wgDefaultUserOptions' => array(),
+ 'wgNoFollowLinks' => true,
+ 'wgThumbnailScriptPath' => false,
+ 'wgUseTeX' => false,
+ 'wgLocaltimezone' => 'UTC',
+ 'wgAllowExternalImages' => true,
+ );
+ $this->savedGlobals = array();
+ foreach( $settings as $var => $val ) {
+ $this->savedGlobals[$var] = $GLOBALS[$var];
+ $GLOBALS[$var] = $val;
+ }
+ $langClass = 'Language' . str_replace( '-', '_', ucfirst( $lang ) );
+ $langObj = setupLangObj( $langClass );
+ $GLOBALS['wgLang'] = $langObj;
+ $GLOBALS['wgContLang'] = $langObj;
+
+ $GLOBALS['wgLoadBalancer']->loadMasterPos();
+ $GLOBALS['wgMessageCache'] = new MessageCache( new BagOStuff(), false, 0, $GLOBALS['wgDBname'] );
+ $this->setupDatabase();
+
+ global $wgUser;
+ $wgUser = new User();
+ }
+
+ # List of temporary tables to create, without prefix
+ # Some of these probably aren't necessary
+ function listTables() {
+ $tables = array('user', 'page', 'revision', 'text',
+ 'pagelinks', 'imagelinks', 'categorylinks',
+ 'templatelinks', 'externallinks', 'langlinks',
+ 'site_stats', 'hitcounter',
+ 'ipblocks', 'image', 'oldimage',
+ 'recentchanges',
+ 'watchlist', 'math', 'searchindex',
+ 'interwiki', 'querycache',
+ 'objectcache', 'job'
+ );
+
+ // FIXME manually adding additional table for the tasks extension
+ // we probably need a better software wide system to register new
+ // tables.
+ global $wgExtensionFunctions;
+ if( in_array('wfTasksExtension' , $wgExtensionFunctions ) ) {
+ $tables[] = 'tasks';
+ }
+
+ return $tables;
+ }
+
+ /**
+ * Set up a temporary set of wiki tables to work with for the tests.
+ * Currently this will only be done once per run, and any changes to
+ * the db will be visible to later tests in the run.
+ *
+ * @private
+ */
+ function setupDatabase() {
+ static $setupDB = false;
+ global $wgDBprefix;
+
+ # Make sure we don't mess with the live DB
+ if (!$setupDB && $wgDBprefix === 'parsertest_') {
+ # oh teh horror
+ $GLOBALS['wgLoadBalancer'] = LoadBalancer::newFromParams( $GLOBALS['wgDBservers'] );
+ $db =& wfGetDB( DB_MASTER );
+
+ $tables = $this->listTables();
+
+ if (!(strcmp($db->getServerVersion(), '4.1') < 0 and stristr($db->getSoftwareLink(), 'MySQL'))) {
+ # Database that supports CREATE TABLE ... LIKE
+ global $wgDBtype;
+ if( $wgDBtype == 'postgres' ) {
+ $def = 'INCLUDING DEFAULTS';
+ } else {
+ $def = '';
+ }
+ foreach ($tables as $tbl) {
+ $newTableName = $db->tableName( $tbl );
+ $tableName = $this->oldTableNames[$tbl];
+ $db->query("CREATE TEMPORARY TABLE $newTableName (LIKE $tableName $def)");
+ }
+ } else {
+ # Hack for MySQL versions < 4.1, which don't support
+ # "CREATE TABLE ... LIKE". Note that
+ # "CREATE TEMPORARY TABLE ... SELECT * FROM ... LIMIT 0"
+ # would not create the indexes we need....
+ foreach ($tables as $tbl) {
+ $res = $db->query("SHOW CREATE TABLE {$this->oldTableNames[$tbl]}");
+ $row = $db->fetchRow($res);
+ $create = $row[1];
+ $create_tmp = preg_replace('/CREATE TABLE `(.*?)`/', 'CREATE TEMPORARY TABLE `'
+ . $wgDBprefix . $tbl .'`', $create);
+ if ($create === $create_tmp) {
+ # Couldn't do replacement
+ wfDie("could not create temporary table $tbl");
+ }
+ $db->query($create_tmp);
+ }
+
+ }
+
+ # Hack: insert a few Wikipedia in-project interwiki prefixes,
+ # for testing inter-language links
+ $db->insert( 'interwiki', array(
+ array( 'iw_prefix' => 'Wikipedia',
+ 'iw_url' => 'http://en.wikipedia.org/wiki/$1',
+ 'iw_local' => 0 ),
+ array( 'iw_prefix' => 'MeatBall',
+ 'iw_url' => 'http://www.usemod.com/cgi-bin/mb.pl?$1',
+ 'iw_local' => 0 ),
+ array( 'iw_prefix' => 'zh',
+ 'iw_url' => 'http://zh.wikipedia.org/wiki/$1',
+ 'iw_local' => 1 ),
+ array( 'iw_prefix' => 'es',
+ 'iw_url' => 'http://es.wikipedia.org/wiki/$1',
+ 'iw_local' => 1 ),
+ array( 'iw_prefix' => 'fr',
+ 'iw_url' => 'http://fr.wikipedia.org/wiki/$1',
+ 'iw_local' => 1 ),
+ array( 'iw_prefix' => 'ru',
+ 'iw_url' => 'http://ru.wikipedia.org/wiki/$1',
+ 'iw_local' => 1 ),
+ ) );
+
+ # Hack: Insert an image to work with
+ $db->insert( 'image', array(
+ 'img_name' => 'Foobar.jpg',
+ 'img_size' => 12345,
+ 'img_description' => 'Some lame file',
+ 'img_user' => 1,
+ 'img_user_text' => 'WikiSysop',
+ 'img_timestamp' => $db->timestamp( '20010115123500' ),
+ 'img_width' => 1941,
+ 'img_height' => 220,
+ 'img_bits' => 24,
+ 'img_media_type' => MEDIATYPE_BITMAP,
+ 'img_major_mime' => "image",
+ 'img_minor_mime' => "jpeg",
+ ) );
+
+ # Update certain things in site_stats
+ $db->insert( 'site_stats', array( 'ss_row_id' => 1, 'ss_images' => 1, 'ss_good_articles' => 1 ) );
+
+ $setupDB = true;
+ }
+ }
+
+ /**
+ * Create a dummy uploads directory which will contain a couple
+ * of files in order to pass existence tests.
+ * @return string The directory
+ * @private
+ */
+ function setupUploadDir() {
+ global $IP;
+
+ $dir = wfTempDir() . "/mwParser-" . mt_rand() . "-images";
+ mkdir( $dir );
+ mkdir( $dir . '/3' );
+ mkdir( $dir . '/3/3a' );
+
+ $img = "$IP/skins/monobook/headbg.jpg";
+ $h = fopen($img, 'r');
+ $c = fread($h, filesize($img));
+ fclose($h);
+
+ $f = fopen( $dir . '/3/3a/Foobar.jpg', 'wb' );
+ fwrite( $f, $c );
+ fclose( $f );
+ return $dir;
+ }
+
+ /**
+ * Restore default values and perform any necessary clean-up
+ * after each test runs.
+ *
+ * @private
+ */
+ function teardownGlobals() {
+ foreach( $this->savedGlobals as $var => $val ) {
+ $GLOBALS[$var] = $val;
+ }
+ if( isset( $this->uploadDir ) ) {
+ $this->teardownUploadDir( $this->uploadDir );
+ unset( $this->uploadDir );
+ }
+ }
+
+ /**
+ * Remove the dummy uploads directory
+ * @private
+ */
+ function teardownUploadDir( $dir ) {
+ unlink( "$dir/3/3a/Foobar.jpg" );
+ rmdir( "$dir/3/3a" );
+ rmdir( "$dir/3" );
+ @rmdir( "$dir/thumb/6/65" );
+ @rmdir( "$dir/thumb/6" );
+
+ @unlink( "$dir/thumb/3/3a/Foobar.jpg/180px-Foobar.jpg" );
+ @rmdir( "$dir/thumb/3/3a/Foobar.jpg" );
+ @rmdir( "$dir/thumb/3/3a" );
+ @rmdir( "$dir/thumb/3/39" ); # wtf?
+ @rmdir( "$dir/thumb/3" );
+ @rmdir( "$dir/thumb" );
+ @rmdir( "$dir" );
+ }
+
+ /**
+ * "Running test $desc..."
+ * @private
+ */
+ function showTesting( $desc ) {
+ print "Running test $desc... ";
+ }
+
+ /**
+ * Print a happy success message.
+ *
+ * @param string $desc The test name
+ * @return bool
+ * @private
+ */
+ function showSuccess( $desc ) {
+ if( !$this->quiet ) {