summaryrefslogtreecommitdiff
path: root/libre/iceweasel
diff options
context:
space:
mode:
authoroaken-source <oaken-source@parabola.nu>2019-11-22 06:49:26 +0100
committeroaken-source <oaken-source@parabola.nu>2019-11-22 06:49:42 +0100
commitadc69f350550ff3d4bbad8a2a8a2e1414b479a74 (patch)
treed68290bfb62a53d4bbefced9615d870cfb9d253f /libre/iceweasel
parent5ae44c2cbc2077b3bee030c41038aa0a0ef99224 (diff)
libre/iceweasel: updated to 70.0.1
Diffstat (limited to 'libre/iceweasel')
-rw-r--r--libre/iceweasel/0001-Use-remoting-name-for-GDK-application-names.patch56
-rw-r--r--libre/iceweasel/PKGBUILD212
-rw-r--r--libre/iceweasel/firefox-70.0-add-distro.patch6965
-rw-r--r--libre/iceweasel/firefox-70.0-update-bindgen.patch23946
-rw-r--r--libre/iceweasel/libre-searchengines.patch (renamed from libre/iceweasel/searchengines.patch)0
-rw-r--r--libre/iceweasel/no-relinking.patch41
6 files changed, 31124 insertions, 96 deletions
diff --git a/libre/iceweasel/0001-Use-remoting-name-for-GDK-application-names.patch b/libre/iceweasel/0001-Use-remoting-name-for-GDK-application-names.patch
new file mode 100644
index 000000000..8cc378636
--- /dev/null
+++ b/libre/iceweasel/0001-Use-remoting-name-for-GDK-application-names.patch
@@ -0,0 +1,56 @@
+From 1cab10b39cfda068100cab8c598f1ca0f50e4131 Mon Sep 17 00:00:00 2001
+Message-Id: <1cab10b39cfda068100cab8c598f1ca0f50e4131.1553597287.git.jan.steffens@gmail.com>
+From: "Jan Alexander Steffens (heftig)" <jan.steffens@gmail.com>
+Date: Mon, 25 Mar 2019 20:30:11 +0100
+Subject: [PATCH] Use remoting name for GDK application names
+
+---
+ toolkit/xre/nsAppRunner.cpp | 6 +-----
+ widget/gtk/nsAppShell.cpp | 7 +++----
+ 2 files changed, 4 insertions(+), 9 deletions(-)
+
+diff --git a/toolkit/xre/nsAppRunner.cpp b/toolkit/xre/nsAppRunner.cpp
+index 0b808ef215f06..28911c0ffeaec 100644
+--- a/toolkit/xre/nsAppRunner.cpp
++++ b/toolkit/xre/nsAppRunner.cpp
+@@ -3728,11 +3728,7 @@ int XREMain::XRE_mainStartup(bool* aExitFlag) {
+ // consistently.
+
+ // Set program name to the one defined in application.ini.
+- {
+- nsAutoCString program(gAppData->name);
+- ToLowerCase(program);
+- g_set_prgname(program.get());
+- }
++ g_set_prgname(gAppData->remotingName);
+
+ // Initialize GTK here for splash.
+
+diff --git a/widget/gtk/nsAppShell.cpp b/widget/gtk/nsAppShell.cpp
+index 0686ff814916b..ae4a4ea55f420 100644
+--- a/widget/gtk/nsAppShell.cpp
++++ b/widget/gtk/nsAppShell.cpp
+@@ -24,6 +24,7 @@
+ # include "WakeLockListener.h"
+ #endif
+ #include "gfxPlatform.h"
++#include "nsAppRunner.h"
+ #include "ScreenHelperGTK.h"
+ #include "HeadlessScreenHelper.h"
+ #include "mozilla/widget/ScreenManager.h"
+@@ -171,10 +172,8 @@ nsresult nsAppShell::Init() {
+ // option when program uses gdk_set_program_class().
+ //
+ // See https://bugzilla.gnome.org/show_bug.cgi?id=747634
+- nsAutoString brandName;
+- mozilla::widget::WidgetUtils::GetBrandShortName(brandName);
+- if (!brandName.IsEmpty()) {
+- gdk_set_program_class(NS_ConvertUTF16toUTF8(brandName).get());
++ if (gAppData) {
++ gdk_set_program_class(gAppData->remotingName);
+ }
+ }
+
+--
+2.21.0
+
diff --git a/libre/iceweasel/PKGBUILD b/libre/iceweasel/PKGBUILD
index fecf3d823..88c99a7ba 100644
--- a/libre/iceweasel/PKGBUILD
+++ b/libre/iceweasel/PKGBUILD
@@ -33,7 +33,6 @@
# - Remove Google API keys and usage
# - Disable Mozilla telemetry and crash reporting (good manners
# because of all of the other patching we're doing
-# - keep using a bunch of system libs
#
# [1]: https://www.mozilla.org/en-US/foundation/trademarks/policy/
@@ -41,18 +40,18 @@
pkgname=iceweasel
replaces=('firefox')
epoch=1
-pkgver=69.0
+pkgver=70.0.1
pkgrel=1
pkgrel+=.parabola1
-_brandingver=69.0
+_brandingver=70.0
_brandingrel=1
-pkgdesc="Libre standalone web browser based on Mozilla Firefox"
+pkgdesc="Standalone web browser based on Mozilla Firefox"
arch=(x86_64)
arch+=(i686 armv7h)
license=(MPL GPL LGPL)
url="https://wiki.parabola.nu/$pkgname"
-depends=(gtk3 mozilla-common libxt startup-notification mime-types dbus-glib
- ffmpeg nss ttf-font libpulse)
+depends=(gtk3 libxt startup-notification mime-types dbus-glib ffmpeg nss
+ ttf-font libpulse)
depends+=(sqlite icu)
makedepends=(unzip zip diffutils python2-setuptools yasm mesa imake inetutils
xorg-server-xvfb autoconf2.13 rust clang llvm jack gtk2
@@ -64,37 +63,47 @@ optdepends=('networkmanager: Location detection via available WiFi networks'
'pulseaudio: Audio support'
'speech-dispatcher: Text-to-Speech'
'hunspell-en_US: Spell checking, American English')
-options=(!emptydirs !makeflags !debug)
+options=(!emptydirs !makeflags !strip)
source=(https://archive.mozilla.org/pub/firefox/releases/$pkgver/source/firefox-$pkgver.source.tar.xz{,.asc}
+ no-relinking.patch
0001-Use-remoting-name-for-GDK-application-names.patch
- $pkgname.desktop
- https://repo.parabola.nu/other/iceweasel/${pkgname}_${_brandingver}-${_brandingrel}.branding.tar.xz{,.sig}
- arm.patch
- build-arm-libopus.patch
- libre.patch
- searchengines.patch)
-sha256sums=('413c3febdfeb69eade818824eecbdb11eaeda71de229573810afd641ba741ec5'
+ $pkgname.desktop)
+source+=(https://repo.parabola.nu/other/iceweasel/${pkgname}_${_brandingver}-${_brandingrel}.branding.tar.xz{,.sig}
+ firefox-70.0-update-bindgen.patch
+ firefox-70.0-add-distro.patch
+ libre.patch
+ libre-searchengines.patch)
+sha256sums=('f2e9bb26af7682b31e82fdfc3a4b3e04fd1caa8b004469ea608185d33e35691b'
'SKIP'
+ '2dc9d1aa5eb7798c89f46478f254ae61e4122b4d1956d6044426288627d8a014'
'ab07ab26617ff76fce68e07c66b8aa9b96c2d3e5b5517e51a3c3eac2edd88894'
'e439117380218898cd5c60a8da8947847efbd0fe64cc06b129d6ca20a392cb3d'
- 'a6a3771416a51c17e22461abef38b3032c7dc991d223bc374d5bb4d3bc93e9e2'
+ 'e90956067d8549f02864d612369ed8714d7173f0ee07bbfb4374a7323b4a733e'
'SKIP'
- '2bb1f6b9b66a00f4f98eb5cdb2f25972018efe042153503cbc8c494780d7225e'
- '2d4d91f7e35d0860225084e37ec320ca6cae669f6c9c8fe7735cdbd542e3a7c9'
+ 'bde80a158f42f6afd01d9f3ce1ff9c8ecb89db96b6b7398f2acfb03436234eeb'
+ '41ab298519dfdf86e1599990cd2a2e4d939a2a1d77de829caa0593d0d2e8f0e8'
'783e1b7c2f6f8719cc8a0751aacc823a5029c2326ac0b5ff7c659d831e286387'
'dfed11d97f1d4198a3dc608be159b6b53a11054f376cdb73bb0fe9a487ae9418')
-validpgpkeys=('14F26682D0916CDD81E37B6D61B7B526D98F0353' # Mozilla Software Releases
- '3954A7AB837D0EA9CFA9798925DB7D9B5A8D4B40') # bill-auger
+validpgpkeys=('14F26682D0916CDD81E37B6D61B7B526D98F0353') # Mozilla Software Releases <release@mozilla.com>
+validpgpkeys+=('BFA8008A8265677063B11BF47171986E4B745536') # Andreas Grapentin
prepare() {
- [[ -d mozbuild ]] && rm -rf mozbuild
+ mkdir mozbuild
cd firefox-$pkgver
+ # Avoid relinking during buildsymbols
+ patch -Np1 -i ../no-relinking.patch
+
# https://bugzilla.mozilla.org/show_bug.cgi?id=1530052
patch -Np1 -i ../0001-Use-remoting-name-for-GDK-application-names.patch
- ## build configuration
- cat >.mozconfig <<END
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1583471
+ patch -Np1 -i ../firefox-70.0-update-bindgen.patch
+
+ # https://bugzilla.mozilla.org/show_bug.cgi?id=1212502
+ patch -Np1 -i ../firefox-70.0-add-distro.patch
+
+ cat >../mozconfig <<END
ac_add_options --enable-application=browser
ac_add_options --prefix=/usr
@@ -102,10 +111,8 @@ ac_add_options --enable-release
ac_add_options --enable-hardening
ac_add_options --enable-optimize
ac_add_options --enable-rust-simd
-ac_add_options --enable-lto
-export MOZ_PGO=1
-export CC=clang
-export CXX=clang++
+export CC='clang --target=x86_64-unknown-linux-gnu'
+export CXX='clang++ --target=x86_64-unknown-linux-gnu'
export AR=llvm-ar
export NM=llvm-nm
export RANLIB=llvm-ranlib
@@ -115,21 +122,14 @@ ac_add_options --disable-official-branding
ac_add_options --with-branding=browser/branding/iceweasel
ac_add_options --enable-update-channel=release
ac_add_options --with-distribution-id=nu.parabola
+ac_add_options --with-unsigned-addon-scopes=app,system
export MOZ_APP_REMOTINGNAME=${pkgname//-/}
export MOZ_TELEMETRY_REPORTING=
export MOZ_REQUIRE_SIGNING=
# System libraries
-ac_add_options --enable-system-ffi
-ac_add_options --enable-system-sqlite
-ac_add_options --with-system-bz2
-ac_add_options --with-system-icu
-ac_add_options --with-system-jpeg
-# FIXME: does not build against libvpx-1.8.0 or 1.8.1 - disable --with-system-libvpx
-# ac_add_options --with-system-libvpx
ac_add_options --with-system-nspr
ac_add_options --with-system-nss
-ac_add_options --with-system-zlib
# Features
ac_add_options --enable-alsa
@@ -139,50 +139,16 @@ ac_add_options --disable-crashreporter
ac_add_options --disable-gconf
ac_add_options --disable-updater
ac_add_options --disable-tests
-ac_add_options --disable-eme
END
if [[ $CARCH = arm* ]]; then
- sed -i '/--enable-hardening/d' .mozconfig
- sed -i '/--enable-optimize/d' .mozconfig
- sed -i '/--enable-rust-simd/d' .mozconfig
- sed -i '/--enable-lto/d' .mozconfig
- sed -i '/export MOZ_PGO/d' .mozconfig
- sed -i '/--disable-eme/d' .mozconfig
-
- patch -Np1 -i ../arm.patch
- patch -Np1 -i ../build-arm-libopus.patch
-
- cat >> .mozconfig <<END
-#ac_add_options --enable-linker=gold
-ac_add_options --enable-optimize="-O2"
-ac_add_options --disable-rust-simd
-ac_add_options --disable-lto
-ac_add_options --disable-webrtc
-ac_add_options --disable-elf-hack
-ac_add_options --disable-av1
-# mk_add_options MOZ_MAKE_FLAGS="$MAKEFLAGS"
-export CC=gcc
-export CXX=g++
-export AR=gcc-ar
-export NM=gcc-nm
-export RANLIB=gcc-ranlib
-END
+ # TODO: fixme
+ false
fi
if [[ $CARCH = i686 ]]; then
- sed -i '/--enable-rust-simd/d' .mozconfig
- sed -i '/--enable-lto/d' .mozconfig
- sed -i '/export MOZ_PGO/d' .mozconfig
-
- cat >> .mozconfig <<END
-ac_add_options --disable-lto
-export CC=gcc
-export CXX=g++
-export AR=gcc-ar
-export NM=gcc-nm
-export RANLIB=gcc-ranlib
-END
+ # TODO: fixme
+ false
fi
## Rebranding
@@ -244,7 +210,7 @@ END
# local _remove_engines_sed='s|.*oogle.*| "ddg", "duckduckgo-html", "duckduckgo-lite", "internet-archive", "parabola-labs", "parabola-packages", "parabola-wiki-en", "searx", "wikipedia", "yacy"|g'
local _remove_engines_sed='s|.*oogle.*| "ddg", "wikipedia"|g'
local _search_config_file=browser/components/search/extensions/list.json
- patch -Np1 -i "$srcdir/searchengines.patch"
+ patch -Np1 -i "$srcdir/libre-searchengines.patch"
sed -i "${_remove_engines_sed}" ${_search_config_file}
# Sanity-check search-engines patching
@@ -260,26 +226,60 @@ build() {
export MOZBUILD_STATE_PATH="$srcdir/mozbuild"
if [[ $CARCH = armv7h ]]; then
- # FIXME: wip ARM FTBS (memory exhausted)
-# export RUSTFLAGS+=" -Cdebuginfo=0"
- export RUSTFLAGS+=" -Cdebuginfo=0 -Clto=off -Copt-level=0"
-# export LDFLAGS+=" -Wl,--no-keep-memory -Wl,--reduce-memory-overheads"
- export LDFLAGS+="-Wl,--no-keep-memory -Wl,--no-keep-files-mapped -Wl,--no-map-whole-files -Wl,--no-mmap-output-file"
+ # TODO: fixme
+ false
fi
if [[ $CARCH = i686 ]]; then
- export RUSTFLAGS+=" -Cdebuginfo=0 -Clto=off"
- export LDFLAGS+=" -Wl,--no-keep-memory -Wl,--reduce-memory-overheads"
+ # TODO: fixme
+ false
fi
# LTO needs more open files
ulimit -n 4096
- if [[ $CARCH = x86_64 ]]; then
- xvfb-run -a -n 97 -s "-screen 0 1600x1200x24" ./mach build
- else
- ./mach build
+ # -fno-plt with cross-LTO causes obscure LLVM errors
+ # LLVM ERROR: Function Import: link error
+ CFLAGS="${CFLAGS/-fno-plt/}"
+ CXXFLAGS="${CXXFLAGS/-fno-plt/}"
+
+ # Do 3-tier PGO
+ msg2 "Building instrumented browser..."
+ cat >.mozconfig ../mozconfig - <<END
+ac_add_options --enable-profile-generate=cross
+END
+ ./mach build
+
+ msg2 "Profiling instrumented browser..."
+ ./mach package
+ LLVM_PROFDATA=llvm-profdata \
+ JARLOG_FILE="$PWD/jarlog" \
+ xvfb-run -a -n 92 -s "-screen 0 1600x1200x24" \
+ ./mach python build/pgo/profileserver.py
+
+ if ! compgen -G '*.profraw' >&2; then
+ error "No profile data produced."
+ return 1
fi
+
+ if [[ ! -s jarlog ]]; then
+ error "No jar log produced."
+ return 1
+ fi
+
+ msg2 "Removing instrumented browser..."
+ ./mach clobber
+
+ msg2 "Building optimized browser..."
+ cat >.mozconfig ../mozconfig - <<END
+ac_add_options --enable-lto=cross
+ac_add_options --enable-profile-use=cross
+ac_add_options --with-pgo-profile-path=${PWD@Q}
+ac_add_options --with-pgo-jarlog=${PWD@Q}/jarlog
+END
+ ./mach build
+
+ msg2 "Building symbol archive..."
./mach buildsymbols
}
@@ -292,9 +292,8 @@ package() {
DESTDIR="$pkgdir" ./mach install
find . -name '*crashreporter-symbols-full.zip' -exec cp -fvt "$startdir" {} +
- ## runtime configuration
- _vendorjs="$pkgdir/usr/lib/$pkgname/browser/defaults/preferences/vendor.js"
- install -Dm644 /dev/stdin "$_vendorjs" <<END
+ local vendorjs="$pkgdir/usr/lib/$pkgname/browser/defaults/preferences/vendor.js"
+ install -Dvm644 /dev/stdin "$vendorjs" <<END
// Use LANG environment variable to choose locale
pref("intl.locale.requested", "");
@@ -319,7 +318,7 @@ END
#
# However, they don't seem to be causing any of the critical issues.
local _shortver=$(cut -d. -f1,2 <<<"$pkgver")
- cat >> "$_vendorjs" <<END
+ cat >> "$vendorjs" <<END
// Disable "alt" as a shortcut key to open full menu bar. Conflicts with "alt" as a modifier
pref("ui.key.menuAccessKeyFocuses", false);
@@ -616,10 +615,15 @@ pref("dom.input.dirpicker", false);
// fix alsa sound sandbox issue for iceweasel-58
// https://labs.parabola.nu/issues/1628
pref("security.sandbox.content.syscall_whitelist", "16");
+
+// Disable recommendations of extensions and themes on about:addons page
+// https://labs.parabola.nu/issues/2409
+pref("extensions.htmlaboutaddons.discover.enabled", false);
+pref("extensions.htmlaboutaddons.recommendations.enabled", false);
END
- _distini="$pkgdir/usr/lib/$pkgname/distribution/distribution.ini"
- install -Dm644 /dev/stdin "$_distini" <<END
+ local distini="$pkgdir/usr/lib/$pkgname/distribution/distribution.ini"
+ install -Dvm644 /dev/stdin "$distini" <<END
[Global]
id=parabola
version=1.0
@@ -631,24 +635,40 @@ app.distributor.channel=$pkgname
app.partner.parabola=parabola
END
+ local i
for i in 16 22 24 32 48 64 128 192 256 384; do
- install -Dm644 browser/branding/$pkgname/default$i.png \
+ install -Dvm644 browser/branding/$pkgname/default$i.png \
"$pkgdir/usr/share/icons/hicolor/${i}x${i}/apps/$pkgname.png"
done
- install -Dm644 "browser/branding/$pkgname/${pkgname}_icon.svg" \
+ install -Dvm644 "browser/branding/$pkgname/${pkgname}_icon.svg" \
"$pkgdir/usr/share/icons/hicolor/scalable/apps/$pkgname.svg"
- install -Dm644 ../$pkgname.desktop \
+ install -Dvm644 ../$pkgname.desktop \
"$pkgdir/usr/share/applications/$pkgname.desktop"
# Install a wrapper to avoid confusion about binary path
- install -Dm755 /dev/stdin "$pkgdir/usr/bin/$pkgname" <<END
+ install -Dvm755 /dev/stdin "$pkgdir/usr/bin/$pkgname" <<END
#!/bin/sh
exec /usr/lib/$pkgname/$pkgname "\$@"
END
# Replace duplicate binary with wrapper
# https://bugzilla.mozilla.org/show_bug.cgi?id=658850
- ln -srf "$pkgdir/usr/bin/$pkgname" \
- "$pkgdir/usr/lib/$pkgname/$pkgname-bin"
+ ln -srfv "$pkgdir/usr/bin/$pkgname" "$pkgdir/usr/lib/$pkgname/$pkgname-bin"
+
+ # Use system certificates
+ local nssckbi="$pkgdir/usr/lib/$pkgname/libnssckbi.so"
+ if [[ -e $nssckbi ]]; then
+ ln -srfv "$pkgdir/usr/lib/libnssckbi.so" "$nssckbi"
+ fi
+
+ if [[ -f "$startdir/.crash-stats-api.token" ]]; then
+ find . -name '*crashreporter-symbols-full.zip' -exec \
+ "$startdir/upload-symbol-archive" "$startdir/.crash-stats-api.token" {} +
+ else
+ find . -name '*crashreporter-symbols-full.zip' -exec \
+ cp -fvt "$startdir" {} +
+ fi
}
+
+# vim:set sw=2 et:
diff --git a/libre/iceweasel/firefox-70.0-add-distro.patch b/libre/iceweasel/firefox-70.0-add-distro.patch
new file mode 100644
index 000000000..8db8ce5dc
--- /dev/null
+++ b/libre/iceweasel/firefox-70.0-add-distro.patch
@@ -0,0 +1,6965 @@
+
+# HG changeset patch
+# User Edwin Takahashi <egao@mozilla.com>
+# Date 1572468170 0
+# Node ID d2d9fe01fc33af4538940e833a6696d973ebea74
+# Parent eddb9fcaaa4bd4fdb4e32024f92f969abfe92f58
+Bug 1212502 - Switch mozinfo to using the 'distro' package to get linux distribution info r=ahal,KWierso
+
+Differential Revision: https://phabricator.services.mozilla.com/D49366
+
+diff --git a/build/virtualenv_packages.txt b/build/virtualenv_packages.txt
+--- a/build/virtualenv_packages.txt
++++ b/build/virtualenv_packages.txt
+@@ -10,16 +10,17 @@ mozilla.pth:third_party/python/atomicwri
+ mozilla.pth:third_party/python/attrs/src
+ python2:mozilla.pth:third_party/python/backports
+ mozilla.pth:third_party/python/biplist
+ mozilla.pth:third_party/python/blessings
+ mozilla.pth:third_party/python/Click
+ mozilla.pth:third_party/python/compare-locales
+ mozilla.pth:third_party/python/configobj
+ mozilla.pth:third_party/python/cram
++mozilla.pth:third_party/python/distro
+ mozilla.pth:third_party/python/dlmanager
+ mozilla.pth:third_party/python/enum34
+ mozilla.pth:third_party/python/fluent
+ mozilla.pth:third_party/python/funcsigs
+ python2:mozilla.pth:third_party/python/futures
+ mozilla.pth:third_party/python/mohawk
+ mozilla.pth:third_party/python/more-itertools
+ mozilla.pth:third_party/python/mozilla-version
+diff --git a/python/mozbuild/mozbuild/action/test_archive.py b/python/mozbuild/mozbuild/action/test_archive.py
+--- a/python/mozbuild/mozbuild/action/test_archive.py
++++ b/python/mozbuild/mozbuild/action/test_archive.py
+@@ -245,16 +245,22 @@ ARCHIVE_FILES = {
+ },
+ {
+ 'source': buildconfig.topsrcdir,
+ 'base': 'third_party/python/six',
+ 'pattern': '**',
+ 'dest': 'tools/six',
+ },
+ {
++ 'source': buildconfig.topsrcdir,
++ 'base': 'third_party/python/distro',
++ 'pattern': '**',
++ 'dest': 'tools/distro',
++ },
++ {
+ 'source': buildconfig.topobjdir,
+ 'base': '',
+ 'pattern': 'mozinfo.json',
+ },
+ {
+ 'source': buildconfig.topobjdir,
+ 'base': 'dist/bin',
+ 'patterns': [
+@@ -431,16 +437,22 @@ ARCHIVE_FILES = {
+ 'dest': 'mozharness',
+ },
+ {
+ 'source': buildconfig.topsrcdir,
+ 'base': 'third_party/python/six',
+ 'pattern': 'six.py',
+ 'dest': 'mozharness',
+ },
++ {
++ 'source': buildconfig.topsrcdir,
++ 'base': 'third_party/python/distro',
++ 'pattern': 'distro.py',
++ 'dest': 'mozharness',
++ },
+ ],
+ 'reftest': [
+ {
+ 'source': buildconfig.topobjdir,
+ 'base': '_tests',
+ 'pattern': 'reftest/**',
+ },
+ {
+diff --git a/testing/mozbase/mozinfo/mozinfo/mozinfo.py b/testing/mozbase/mozinfo/mozinfo/mozinfo.py
+--- a/testing/mozbase/mozinfo/mozinfo/mozinfo.py
++++ b/testing/mozbase/mozinfo/mozinfo/mozinfo.py
+@@ -9,16 +9,17 @@
+ # information and having behaviour depend on it
+
+ from __future__ import absolute_import, print_function
+
+ import os
+ import platform
+ import re
+ import sys
++
+ from .string_version import StringVersion
+ from ctypes.util import find_library
+
+ # keep a copy of the os module since updating globals overrides this
+ _os = os
+
+
+ class unknown(object):
+@@ -93,35 +94,45 @@ if system in ["Microsoft", "Windows"]:
+ version = "%d.%d.%d" % (major, minor, build_number)
+
+ os_version = "%d.%d" % (major, minor)
+ elif system.startswith(('MINGW', 'MSYS_NT')):
+ # windows/mingw python build (msys)
+ info['os'] = 'win'
+ os_version = version = unknown
+ elif system == "Linux":
+- if hasattr(platform, "linux_distribution"):
+- (distro, os_version, codename) = platform.linux_distribution()
++ # Only attempt to import distro for Linux.
++ # https://github.com/nir0s/distro/issues/177
++ try:
++ import distro
++ except ImportError:
++ pass
++ # First use distro package, then fall back to platform.
++ # This will only until Mozilla upgrades python to 3.8.
++ if hasattr(distro, "linux_distribution"):
++ (distribution, os_version, codename) = distro.linux_distribution()
++ elif hasattr(platform, "linux_distribution"):
++ (distribution, os_version, codename) = platform.linux_distribution()
+ else:
+- (distro, os_version, codename) = platform.dist()
++ (distribution, os_version, codename) = platform.dist()
+ if not processor:
+ processor = machine
+- version = "%s %s" % (distro, os_version)
++ version = "%s %s" % (distribution, os_version)
+
+ # Bug in Python 2's `platform` library:
+ # It will return a triple of empty strings if the distribution is not supported.
+ # It works on Python 3. If we don't have an OS version,
+ # the unit tests fail to run.
+- if not distro and not os_version and not codename:
+- distro = 'lfs'
++ if not distribution and not os_version and not codename:
++ distribution = 'lfs'
+ version = release
+ os_version = release
+
+ info['os'] = 'linux'
+- info['linux_distro'] = distro
++ info['linux_distro'] = distribution
+ elif system in ['DragonFly', 'FreeBSD', 'NetBSD', 'OpenBSD']:
+ info['os'] = 'bsd'
+ version = os_version = sys.platform
+ elif system == "Darwin":
+ (release, versioninfo, machine) = platform.mac_ver()
+ version = "OS X %s" % release
+ versionNums = release.split('.')[:2]
+ os_version = "%s.%s" % (versionNums[0], versionNums[1])
+diff --git a/testing/mozbase/mozinfo/setup.py b/testing/mozbase/mozinfo/setup.py
+--- a/testing/mozbase/mozinfo/setup.py
++++ b/testing/mozbase/mozinfo/setup.py
+@@ -4,17 +4,20 @@
+
+ from __future__ import absolute_import
+
+ from setuptools import setup
+
+ PACKAGE_VERSION = "1.1.0"
+
+ # dependencies
+-deps = ["mozfile >= 0.12"]
++deps = [
++ "distro == 1.4.0",
++ "mozfile >= 0.12",
++]
+
+ setup(
+ name="mozinfo",
+ version=PACKAGE_VERSION,
+ description="Library to get system information for use in Mozilla testing",
+ long_description="see https://firefox-source-docs.mozilla.org/mozbase/index.html",
+ classifiers=[
+ "Programming Language :: Python :: 2.7",
+diff --git a/testing/mozharness/tox.ini b/testing/mozharness/tox.ini
+--- a/testing/mozharness/tox.ini
++++ b/testing/mozharness/tox.ini
+@@ -1,14 +1,15 @@
+ [tox]
+ envlist = py27-hg4.3
+
+ [base]
+ deps =
+ coverage
++ distro
+ nose
+ rednose
+ {toxinidir}/../mozbase/mozlog
+ mozbase = {toxinidir}/../mozbase/
+
+
+ [testenv]
+ basepython = python2.7
+diff --git a/third_party/python/distro/CHANGELOG.md b/third_party/python/distro/CHANGELOG.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/CHANGELOG.md
+@@ -0,0 +1,147 @@
++## 1.4.0 (2019.2.4)
++
++BACKWARD COMPATIBILITY:
++* Prefer the VERSION_CODENAME field of os-release to parsing it from VERSION [[#230](https://github.com/nir0s/distro/pull/230)]
++
++BUG FIXES:
++* Return _uname_info from the uname_info() method [[#233](https://github.com/nir0s/distro/pull/233)]
++* Fixed CloudLinux id discovery [[#234](https://github.com/nir0s/distro/pull/234)]
++* Update Oracle matching [[#224](https://github.com/nir0s/distro/pull/224)]
++
++DOCS:
++* Update Fedora package link [[#225](https://github.com/nir0s/distro/pull/225)]
++* Distro is the recommended replacement for platform.linux_distribution [[#220](https://github.com/nir0s/distro/pull/220)]
++
++RELEASE:
++* Use Markdown for long description in setup.py [[#219](https://github.com/nir0s/distro/pull/219)]
++
++Additionally, The Python2.6 branch was fixed and rebased on top of master. It is now passing all tests. Thanks [abadger](https://github.com/abadger)!
++
++## 1.3.0 (2018.05.09)
++
++ENHANCEMENTS:
++* Added support for OpenBSD, FreeBSD, and NetBSD [[#207](https://github.com/nir0s/distro/issues/207)]
++
++TESTS:
++* Add test for Kali Linux Rolling [[#214](https://github.com/nir0s/distro/issues/214)]
++
++DOCS:
++* Update docs with regards to #207 [[#209](https://github.com/nir0s/distro/issues/209)]
++* Add Ansible reference implementation and fix arch-linux link [[#213](https://github.com/nir0s/distro/issues/213)]
++* Add facter reference implementation [[#213](https://github.com/nir0s/distro/issues/213)]
++
++## 1.2.0 (2017.12.24)
++
++BACKWARD COMPATIBILITY:
++* Don't raise ImportError on non-linux platforms [[#202](https://github.com/nir0s/distro/issues/202)]
++
++ENHANCEMENTS:
++* Lazily load the LinuxDistribution data [[#201](https://github.com/nir0s/distro/issues/201)]
++
++BUG FIXES:
++* Stdout of shell should be decoded with sys.getfilesystemencoding() [[#203](https://github.com/nir0s/distro/issues/203)]
++
++TESTS:
++* Explicitly set Python versions on Travis for flake [[#204](https://github.com/nir0s/distro/issues/204)]
++
++
++## 1.1.0 (2017.11.28)
++
++BACKWARD COMPATIBILITY:
++* Drop python3.3 support [[#199](https://github.com/nir0s/distro/issues/199)]
++* Remove Official Python26 support [[#195](https://github.com/nir0s/distro/issues/195)]
++
++TESTS:
++* Add MandrivaLinux test case [[#181](https://github.com/nir0s/distro/issues/181)]
++* Add test cases for CloudLinux 5, 6, and 7 [[#180](https://github.com/nir0s/distro/issues/180)]
++
++RELEASE:
++* Modify MANIFEST to include resources for tests and docs in source tarballs [[97c91a1](97c91a1)]
++
++## 1.0.4 (2017.04.01)
++
++BUG FIXES:
++* Guess common *-release files if /etc not readable [[#175](https://github.com/nir0s/distro/issues/175)]
++
++## 1.0.3 (2017.03.19)
++
++ENHANCEMENTS:
++* Show keys for empty values when running distro from the CLI [[#160](https://github.com/nir0s/distro/issues/160)]
++* Add manual mapping for `redhatenterpriseserver` (previously only redhatenterpriseworkstation was mapped) [[#148](https://github.com/nir0s/distro/issues/148)]
++* Race condition in `_parse_distro_release_file` [[#163](https://github.com/nir0s/distro/issues/163)]
++
++TESTS:
++* Add RHEL5 test case [[#165](https://github.com/nir0s/distro/issues/165)]
++* Add OpenELEC test case [[#166](https://github.com/nir0s/distro/issues/166)]
++* Replace nose with pytest [[#158](https://github.com/nir0s/distro/issues/158)]
++
++RELEASE:
++* Update classifiers
++* Update supported Python versions (with py36)
++
++## 1.0.2 (2017.01.12)
++
++TESTS:
++* Test on py33, py36 and py3 based flake8
++
++RELEASE:
++* Add MANIFEST file (which also includes the LICENSE as part of Issue [[#139](https://github.com/nir0s/distro/issues/139)])
++* Default to releasing using Twine [[#121](https://github.com/nir0s/distro/issues/121)]
++* Add setup.cfg file [[#145](https://github.com/nir0s/distro/issues/145)]
++* Update license in setup.py
++
++## 1.0.1 (2016-11-03)
++
++ENHANCEMENTS:
++* Prettify distro -j's output and add more elaborate docs [[#147](https://github.com/nir0s/distro/issues/147)]
++* Decode output of `lsb_release` as utf-8 [[#144](https://github.com/nir0s/distro/issues/144)]
++* Logger now uses `message %s, string` form to not-evaulate log messages if unnecessary [[#145](https://github.com/nir0s/distro/issues/145)]
++
++TESTS:
++* Increase code-coverage [[#146](https://github.com/nir0s/distro/issues/146)]
++* Fix landscape code-quality warnings [[#145](https://github.com/nir0s/distro/issues/145)]
++
++RELEASE:
++* Add CONTRIBUTING.md
++
++## 1.0.0 (2016-09-25)
++
++BACKWARD COMPATIBILITY:
++* raise exception when importing on non-supported platforms [[#129](https://github.com/nir0s/distro/issues/129)]
++
++ENHANCEMENTS:
++* Use `bytes` invariantly [[#135](https://github.com/nir0s/distro/issues/135)]
++* Some minor code adjustments plus a CLI [[#134](https://github.com/nir0s/distro/issues/134)]
++* Emit stderr if `lsb_release` fails
++
++BUG FIXES:
++* Fix some encoding related issues
++
++TESTS:
++* Add many test cases (e.g. Raspbian 8, CoreOS, Amazon Linux, Scientific Linux, Gentoo, Manjaro)
++* Completely redo the testing framework to make it easier to add tests
++* Test on pypy
++
++RELEASE:
++* Remove six as a dependency
++
++## 0.6.0 (2016-04-21)
++
++This is the first release of `distro`.
++All previous work was done on `ld` and therefore unmentioned here. See the release log in GitHub if you want the entire log.
++
++BACKWARD COMPATIBILITY:
++* No longer a package. constants.py has been removed and distro is now a single module
++
++ENHANCEMENTS:
++* distro.info() now receives best and pretty flags
++* Removed get_ prefix from get_*_release_attr functions
++* Codename is now passed in distro.info()
++
++TESTS:
++* Added Linux Mint test case
++* Now testing on Python 3.4
++
++DOCS:
++* Documentation fixes
++
+diff --git a/third_party/python/distro/CONTRIBUTING.md b/third_party/python/distro/CONTRIBUTING.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/CONTRIBUTING.md
+@@ -0,0 +1,54 @@
++# General
++
++* Contributing to distro identification currently doesn't have any specific standards and rather depends on the specific implementation.
++* A 100% coverage is expected for each PR unless explicitly authorized by the reviewer.
++* Please try to maintain maximum code-health (via landscape.io).
++
++# Contributing distro specific tests
++
++Distro's tests are implemented via a standardized framework under `tests/test_distro.py`
++
++For each distribution, tests should be added in the relevant class according to which distribution file(s) exists on it, so, for example, tests should be added under `TestOSRelease` where `/etc/os-release` is available.
++
++The tests must be self-contained, meaning that the release files for the distribution should be maintained in the repository under `tests/resources/distros/distribution_name+distribution_version`.
++
++A tests method would like somewhat like this:
++
++```python
++def test_centos7_os_release(self):
++ desired_outcome = {
++ 'id': 'centos',
++ 'name': 'CentOS Linux',
++ 'pretty_name': 'CentOS Linux 7 (Core)',
++ 'version': '7',
++ 'pretty_version': '7 (Core)',
++ 'best_version': '7',
++ 'like': 'rhel fedora',
++ 'codename': 'Core'
++ }
++ self._test_outcome(desired_outcome)
++```
++
++The framework will automatically try to pick up the relevant file according to the method's name (`centos7` meaning the folder should be named `centos7` as well) and compare the `desired_outcome` with the parsed files found under the test dir.
++
++The exception to the rule is under the `TestDistroRelease` test class which should look somewhat like this:
++
++```python
++def test_centos5_dist_release(self):
++ desired_outcome = {
++ 'id': 'centos',
++ 'name': 'CentOS',
++ 'pretty_name': 'CentOS 5.11 (Final)',
++ 'version': '5.11',
++ 'pretty_version': '5.11 (Final)',
++ 'best_version': '5.11',
++ 'codename': 'Final',
++ 'major_version': '5',
++ 'minor_version': '11'
++ }
++ self._test_outcome(desired_outcome, 'centos', '5')
++```
++
++Where the name of the method is not indicative of the lookup folder but rather tha two last arguments in `_test_outcome`.
++
++A test case is mandatory under `TestOverall` for a PR to be complete.
+\ No newline at end of file
+diff --git a/third_party/python/distro/CONTRIBUTORS.md b/third_party/python/distro/CONTRIBUTORS.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/CONTRIBUTORS.md
+@@ -0,0 +1,13 @@
++Thanks!
++
++* https://github.com/andy-maier
++* https://github.com/SethMichaelLarson
++* https://github.com/asottile
++* https://github.com/MartijnBraam
++* https://github.com/funkyfuture
++* https://github.com/adamjstewart
++* https://github.com/xavfernandez
++* https://github.com/xsuchy
++* https://github.com/marcoceppi
++* https://github.com/tgamblin
++* https://github.com/sebix
+diff --git a/third_party/python/distro/LICENSE b/third_party/python/distro/LICENSE
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/LICENSE
+@@ -0,0 +1,202 @@
++Apache License
++ Version 2.0, January 2004
++ http://www.apache.org/licenses/
++
++ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++
++ 1. Definitions.
++
++ "License" shall mean the terms and conditions for use, reproduction,
++ and distribution as defined by Sections 1 through 9 of this document.
++
++ "Licensor" shall mean the copyright owner or entity authorized by
++ the copyright owner that is granting the License.
++
++ "Legal Entity" shall mean the union of the acting entity and all
++ other entities that control, are controlled by, or are under common
++ control with that entity. For the purposes of this definition,
++ "control" means (i) the power, direct or indirect, to cause the
++ direction or management of such entity, whether by contract or
++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++ outstanding shares, or (iii) beneficial ownership of such entity.
++
++ "You" (or "Your") shall mean an individual or Legal Entity
++ exercising permissions granted by this License.
++
++ "Source" form shall mean the preferred form for making modifications,
++ including but not limited to software source code, documentation
++ source, and configuration files.
++
++ "Object" form shall mean any form resulting from mechanical
++ transformation or translation of a Source form, including but
++ not limited to compiled object code, generated documentation,
++ and conversions to other media types.
++
++ "Work" shall mean the work of authorship, whether in Source or
++ Object form, made available under the License, as indicated by a
++ copyright notice that is included in or attached to the work
++ (an example is provided in the Appendix below).
++
++ "Derivative Works" shall mean any work, whether in Source or Object
++ form, that is based on (or derived from) the Work and for which the
++ editorial revisions, annotations, elaborations, or other modifications
++ represent, as a whole, an original work of authorship. For the purposes
++ of this License, Derivative Works shall not include works that remain
++ separable from, or merely link (or bind by name) to the interfaces of,
++ the Work and Derivative Works thereof.
++
++ "Contribution" shall mean any work of authorship, including
++ the original version of the Work and any modifications or additions
++ to that Work or Derivative Works thereof, that is intentionally
++ submitted to Licensor for inclusion in the Work by the copyright owner
++ or by an individual or Legal Entity authorized to submit on behalf of
++ the copyright owner. For the purposes of this definition, "submitted"
++ means any form of electronic, verbal, or written communication sent
++ to the Licensor or its representatives, including but not limited to
++ communication on electronic mailing lists, source code control systems,
++ and issue tracking systems that are managed by, or on behalf of, the
++ Licensor for the purpose of discussing and improving the Work, but
++ excluding communication that is conspicuously marked or otherwise
++ designated in writing by the copyright owner as "Not a Contribution."
++
++ "Contributor" shall mean Licensor and any individual or Legal Entity
++ on behalf of whom a Contribution has been received by Licensor and
++ subsequently incorporated within the Work.
++
++ 2. Grant of Copyright License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ copyright license to reproduce, prepare Derivative Works of,
++ publicly display, publicly perform, sublicense, and distribute the
++ Work and such Derivative Works in Source or Object form.
++
++ 3. Grant of Patent License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ (except as stated in this section) patent license to make, have made,
++ use, offer to sell, sell, import, and otherwise transfer the Work,
++ where such license applies only to those patent claims licensable
++ by such Contributor that are necessarily infringed by their
++ Contribution(s) alone or by combination of their Contribution(s)
++ with the Work to which such Contribution(s) was submitted. If You
++ institute patent litigation against any entity (including a
++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++ or a Contribution incorporated within the Work constitutes direct
++ or contributory patent infringement, then any patent licenses
++ granted to You under this License for that Work shall terminate
++ as of the date such litigation is filed.
++
++ 4. Redistribution. You may reproduce and distribute copies of the
++ Work or Derivative Works thereof in any medium, with or without
++ modifications, and in Source or Object form, provided that You
++ meet the following conditions:
++
++ (a) You must give any other recipients of the Work or
++ Derivative Works a copy of this License; and
++
++ (b) You must cause any modified files to carry prominent notices
++ stating that You changed the files; and
++
++ (c) You must retain, in the Source form of any Derivative Works
++ that You distribute, all copyright, patent, trademark, and
++ attribution notices from the Source form of the Work,
++ excluding those notices that do not pertain to any part of
++ the Derivative Works; and
++
++ (d) If the Work includes a "NOTICE" text file as part of its
++ distribution, then any Derivative Works that You distribute must
++ include a readable copy of the attribution notices contained
++ within such NOTICE file, excluding those notices that do not
++ pertain to any part of the Derivative Works, in at least one
++ of the following places: within a NOTICE text file distributed
++ as part of the Derivative Works; within the Source form or
++ documentation, if provided along with the Derivative Works; or,
++ within a display generated by the Derivative Works, if and
++ wherever such third-party notices normally appear. The contents
++ of the NOTICE file are for informational purposes only and
++ do not modify the License. You may add Your own attribution
++ notices within Derivative Works that You distribute, alongside
++ or as an addendum to the NOTICE text from the Work, provided
++ that such additional attribution notices cannot be construed
++ as modifying the License.
++
++ You may add Your own copyright statement to Your modifications and
++ may provide additional or different license terms and conditions
++ for use, reproduction, or distribution of Your modifications, or
++ for any such Derivative Works as a whole, provided Your use,
++ reproduction, and distribution of the Work otherwise complies with
++ the conditions stated in this License.
++
++ 5. Submission of Contributions. Unless You explicitly state otherwise,
++ any Contribution intentionally submitted for inclusion in the Work
++ by You to the Licensor shall be under the terms and conditions of
++ this License, without any additional terms or conditions.
++ Notwithstanding the above, nothing herein shall supersede or modify
++ the terms of any separate license agreement you may have executed
++ with Licensor regarding such Contributions.
++
++ 6. Trademarks. This License does not grant permission to use the trade
++ names, trademarks, service marks, or product names of the Licensor,
++ except as required for reasonable and customary use in describing the
++ origin of the Work and reproducing the content of the NOTICE file.
++
++ 7. Disclaimer of Warranty. Unless required by applicable law or
++ agreed to in writing, Licensor provides the Work (and each
++ Contributor provides its Contributions) on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++ implied, including, without limitation, any warranties or conditions
++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++ PARTICULAR PURPOSE. You are solely responsible for determining the
++ appropriateness of using or redistributing the Work and assume any
++ risks associated with Your exercise of permissions under this License.
++
++ 8. Limitation of Liability. In no event and under no legal theory,
++ whether in tort (including negligence), contract, or otherwise,
++ unless required by applicable law (such as deliberate and grossly
++ negligent acts) or agreed to in writing, shall any Contributor be
++ liable to You for damages, including any direct, indirect, special,
++ incidental, or consequential damages of any character arising as a
++ result of this License or out of the use or inability to use the
++ Work (including but not limited to damages for loss of goodwill,
++ work stoppage, computer failure or malfunction, or any and all
++ other commercial damages or losses), even if such Contributor
++ has been advised of the possibility of such damages.
++
++ 9. Accepting Warranty or Additional Liability. While redistributing
++ the Work or Derivative Works thereof, You may choose to offer,
++ and charge a fee for, acceptance of support, warranty, indemnity,
++ or other liability obligations and/or rights consistent with this
++ License. However, in accepting such obligations, You may act only
++ on Your own behalf and on Your sole responsibility, not on behalf
++ of any other Contributor, and only if You agree to indemnify,
++ defend, and hold each Contributor harmless for any liability
++ incurred by, or claims asserted against, such Contributor by reason
++ of your accepting any such warranty or additional liability.
++
++ END OF TERMS AND CONDITIONS
++
++ APPENDIX: How to apply the Apache License to your work.
++
++ To apply the Apache License to your work, attach the following
++ boilerplate notice, with the fields enclosed by brackets "{}"
++ replaced with your own identifying information. (Don't include
++ the brackets!) The text should be enclosed in the appropriate
++ comment syntax for the file format. We also recommend that a
++ file or class name and description of purpose be included on the
++ same "printed page" as the copyright notice for easier
++ identification within third-party archives.
++
++ Copyright {yyyy} {name of copyright owner}
++
++ Licensed under the Apache License, Version 2.0 (the "License");
++ you may not use this file except in compliance with the License.
++ You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++ Unless required by applicable law or agreed to in writing, software
++ distributed under the License is distributed on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++ See the License for the specific language governing permissions and
++ limitations under the License.
++
+diff --git a/third_party/python/distro/MANIFEST.in b/third_party/python/distro/MANIFEST.in
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/MANIFEST.in
+@@ -0,0 +1,12 @@
++include *.md
++include *.py
++include *.txt
++include LICENSE
++include CHANGES
++include Makefile
++
++graft tests
++
++include docs/*
++
++global-exclude *.py[co]
+diff --git a/third_party/python/distro/Makefile b/third_party/python/distro/Makefile
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/Makefile
+@@ -0,0 +1,145 @@
++# Copyright 2015,2016 Nir Cohen
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++# Name of this package
++PACKAGENAME = distro
++
++# Additional options for Sphinx
++SPHINXOPTS = -v
++
++# Paper format for the Sphinx LaTex/PDF builder.
++# Valid values: a4, letter
++SPHINXPAPER = a4
++
++# Sphinx build subtree.
++SPHINXBUILDDIR = build_docs
++
++# Directory where conf.py is located
++SPHINXCONFDIR = docs
++
++# Directory where input files for Sphinx are located
++SPHINXSOURCEDIR = .
++
++# Sphinx build command (Use 'pip install sphinx' to get it)
++SPHINXBUILD = sphinx-build
++
++# Internal variables for Sphinx
++SPHINXPAPEROPT_a4 = -D latex_paper_size=a4
++SPHINXPAPEROPT_letter = -D latex_paper_size=letter
++ALLSPHINXOPTS = -d $(SPHINXBUILDDIR)/doctrees -c $(SPHINXCONFDIR) \
++ $(SPHINXPAPEROPT_$(SPHINXPAPER)) $(SPHINXOPTS) \
++ $(SPHINXSOURCEDIR)
++
++.PHONY: help
++help:
++ @echo 'Please use "make <target>" where <target> is one of'
++ @echo " release - build a release and publish it"
++ @echo " dev - prepare a development environment (includes tests)"
++ @echo " instdev - prepare a development environment (no tests)"
++ @echo " install - install into current Python environment"
++ @echo " html - generate docs as standalone HTML files in: $(SPHINXBUILDDIR)/html"
++ @echo " pdf - generate docs as PDF (via LaTeX) for paper format: $(SPHINXPAPER) in: $(SPHINXBUILDDIR)/pdf"
++ @echo " man - generate docs as manual pages in: $(SPHINXBUILDDIR)/man"
++ @echo " docchanges - generate an overview of all changed/added/deprecated items in docs"
++ @echo " doclinkcheck - check all external links in docs for integrity"
++ @echo " doccoverage - run coverage check of the documentation"
++ @echo " clobber - remove any build products"
++ @echo " build - build the package"
++ @echo " test - test from this directory using tox, including test coverage"
++ @echo " publish - upload to PyPI"
++ @echo " clean - remove any temporary build products"
++ @echo " dry-run - perform all action required for a release without actually releasing"
++
++.PHONY: release
++release: test clean build publish
++ @echo "$@ done."
++
++.PHONY: test
++test:
++ pip install 'tox>=1.7.2'
++ tox
++ @echo "$@ done."
++
++.PHONY: clean
++clean:
++ rm -rf dist build $(PACKAGENAME).egg-info
++ @echo "$@ done."
++
++.PHONY: build
++build:
++ python setup.py sdist bdist_wheel
++
++.PHONY: publish
++publish:
++ twine upload -r pypi dist/$(PACKAGENAME)-*
++ @echo "$@ done."
++
++.PHONY: dry-run
++dry-run: test clean build
++ @echo "$@ done."
++
++.PHONY: dev
++dev: instdev test
++ @echo "$@ done."
++
++.PHONY: instdev
++instdev:
++ pip install -r dev-requirements.txt
++ python setup.py develop
++ @echo "$@ done."
++
++.PHONY: install
++install:
++ python setup.py install
++ @echo "$@ done."
++
++.PHONY: html
++html:
++ $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(SPHINXBUILDDIR)/html
++ @echo "$@ done; the HTML pages are in $(SPHINXBUILDDIR)/html."
++
++.PHONY: pdf
++pdf:
++ $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(SPHINXBUILDDIR)/pdf
++ @echo "Running LaTeX files through pdflatex..."
++ $(MAKE) -C $(SPHINXBUILDDIR)/pdf all-pdf
++ @echo "$@ done; the PDF files are in $(SPHINXBUILDDIR)/pdf."
++
++.PHONY: man
++man:
++ $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(SPHINXBUILDDIR)/man
++ @echo "$@ done; the manual pages are in $(SPHINXBUILDDIR)/man."
++
++.PHONY: docchanges
++docchanges:
++ $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(SPHINXBUILDDIR)/changes
++ @echo
++ @echo "$@ done; the doc changes overview file is in $(SPHINXBUILDDIR)/changes."
++
++.PHONY: doclinkcheck
++doclinkcheck:
++ $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(SPHINXBUILDDIR)/linkcheck
++ @echo
++ @echo "$@ done; look for any errors in the above output " \
++ "or in $(SPHINXBUILDDIR)/linkcheck/output.txt."
++
++.PHONY: doccoverage
++doccoverage:
++ $(SPHINXBUILD) -b coverage $(ALLSPHINXOPTS) $(SPHINXBUILDDIR)/coverage
++ @echo "$@ done; the doc coverage results are in $(SPHINXBUILDDIR)/coverage/python.txt."
++
++.PHONY: clobber
++clobber: clean
++ rm -rf $(SPHINXBUILDDIR)
++ @echo "$@ done."
+diff --git a/third_party/python/distro/PKG-INFO b/third_party/python/distro/PKG-INFO
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/PKG-INFO
+@@ -0,0 +1,168 @@
++Metadata-Version: 2.1
++Name: distro
++Version: 1.4.0
++Summary: Distro - an OS platform information API
++Home-page: https://github.com/nir0s/distro
++Author: Nir Cohen
++Author-email: nir36g@gmail.com
++License: Apache License, Version 2.0
++Description: Distro - an OS platform information API
++ =======================================
++
++ [![Build Status](https://travis-ci.org/nir0s/distro.svg?branch=master)](https://travis-ci.org/nir0s/distro)
++ [![Build status](https://ci.appveyor.com/api/projects/status/e812qjk1gf0f74r5/branch/master?svg=true)](https://ci.appveyor.com/project/nir0s/distro/branch/master)
++ [![PyPI version](http://img.shields.io/pypi/v/distro.svg)](https://pypi.python.org/pypi/distro)
++ [![Supported Python Versions](https://img.shields.io/pypi/pyversions/distro.svg)](https://img.shields.io/pypi/pyversions/distro.svg)
++ [![Requirements Status](https://requires.io/github/nir0s/distro/requirements.svg?branch=master)](https://requires.io/github/nir0s/distro/requirements/?branch=master)
++ [![Code Coverage](https://codecov.io/github/nir0s/distro/coverage.svg?branch=master)](https://codecov.io/github/nir0s/distro?branch=master)
++ [![Code Quality](https://landscape.io/github/nir0s/distro/master/landscape.svg?style=flat)](https://landscape.io/github/nir0s/distro)
++ [![Is Wheel](https://img.shields.io/pypi/wheel/distro.svg?style=flat)](https://pypi.python.org/pypi/distro)
++ [![Latest Github Release](https://readthedocs.org/projects/distro/badge/?version=stable)](http://distro.readthedocs.io/en/latest/)
++ [![Join the chat at https://gitter.im/nir0s/distro](https://badges.gitter.im/nir0s/distro.svg)](https://gitter.im/nir0s/distro?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
++
++ `distro` provides information about the
++ OS distribution it runs on, such as a reliable machine-readable ID, or
++ version information.
++
++ It is the recommended replacement for Python's original
++ [`platform.linux_distribution`](https://docs.python.org/3.7/library/platform.html#platform.linux_distribution)
++ function (which will be removed in Python 3.8).
++ It also provides much more functionality which isn't necessarily Python bound,
++ like a command-line interface.
++
++ Distro currently supports Linux and BSD based systems but [Windows and OS X support](https://github.com/nir0s/distro/issues/177) is also planned.
++
++ For Python 2.6 support, see https://github.com/nir0s/distro/tree/python2.6-support
++
++ ## Installation
++
++ Installation of the latest released version from PyPI:
++
++ ```shell
++ pip install distro
++ ```
++
++ Installation of the latest development version:
++
++ ```shell
++ pip install https://github.com/nir0s/distro/archive/master.tar.gz
++ ```
++
++
++ ## Usage
++
++ ```bash
++ $ distro
++ Name: Antergos Linux
++ Version: 2015.10 (ISO-Rolling)
++ Codename: ISO-Rolling
++
++ $ distro -j
++ {
++ "codename": "ISO-Rolling",
++ "id": "antergos",
++ "like": "arch",
++ "version": "16.9",
++ "version_parts": {
++ "build_number": "",
++ "major": "16",
++ "minor": "9"
++ }
++ }
++
++
++ $ python
++ >>> import distro
++ >>> distro.linux_distribution(full_distribution_name=False)
++ ('centos', '7.1.1503', 'Core')
++ ```
++
++
++ ## Documentation
++
++ On top of the aforementioned API, several more functions are available. For a complete description of the
++ API, see the [latest API documentation](http://distro.readthedocs.org/en/latest/).
++
++ ## Background
++
++ An alternative implementation became necessary because Python 3.5 deprecated
++ this function, and Python 3.8 will remove it altogether.
++ Its predecessor function `platform.dist` was already deprecated since
++ Python 2.6 and will also be removed in Python 3.8.
++ Still, there are many cases in which access to that information is needed.
++ See [Python issue 1322](https://bugs.python.org/issue1322) for more
++ information.
++
++ The `distro` package implements a robust and inclusive way of retrieving the
++ information about a distribution based on new standards and old methods,
++ namely from these data sources (from high to low precedence):
++
++ * The os-release file `/etc/os-release`, if present.
++ * The output of the `lsb_release` command, if available.
++ * The distro release file (`/etc/*(-|_)(release|version)`), if present.
++ * The `uname` command for BSD based distrubtions.
++
++
++ ## Python and Distribution Support
++
++ `distro` is supported and tested on Python 2.7, 3.4+ and PyPy and on
++ any distribution that provides one or more of the data sources
++ covered.
++
++ This package is tested with test data that mimics the exact behavior of the data sources of [a number of Linux distributions](https://github.com/nir0s/distro/tree/master/tests/resources/distros).
++
++
++ ## Testing
++
++ ```shell
++ git clone git@github.com:nir0s/distro.git
++ cd distro
++ pip install tox
++ tox
++ ```
++
++
++ ## Contributions
++
++ Pull requests are always welcome to deal with specific distributions or just
++ for general merriment.
++
++ See [CONTRIBUTIONS](https://github.com/nir0s/distro/blob/master/CONTRIBUTING.md) for contribution info.
++
++ Reference implementations for supporting additional distributions and file
++ formats can be found here:
++
++ * https://github.com/saltstack/salt/blob/develop/salt/grains/core.py#L1172
++ * https://github.com/chef/ohai/blob/master/lib/ohai/plugins/linux/platform.rb
++ * https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/facts/system/distribution.py
++ * https://github.com/puppetlabs/facter/blob/master/lib/src/facts/linux/os_linux.cc
++
++ ## Package manager distributions
++
++ * https://src.fedoraproject.org/rpms/python-distro
++ * https://www.archlinux.org/packages/community/any/python-distro/
++ * https://launchpad.net/ubuntu/+source/python-distro
++ * https://packages.debian.org/sid/python-distro
++ * https://packages.gentoo.org/packages/dev-python/distro
++ * https://pkgs.org/download/python2-distro
++ * https://slackbuilds.org/repository/14.2/python/python-distro/
++
++Platform: All
++Classifier: Development Status :: 5 - Production/Stable
++Classifier: Intended Audience :: Developers
++Classifier: Intended Audience :: System Administrators
++Classifier: License :: OSI Approved :: Apache Software License
++Classifier: Operating System :: POSIX :: Linux
++Classifier: Operating System :: POSIX :: BSD
++Classifier: Operating System :: POSIX :: BSD :: FreeBSD
++Classifier: Operating System :: POSIX :: BSD :: NetBSD
++Classifier: Operating System :: POSIX :: BSD :: OpenBSD
++Classifier: Programming Language :: Python :: 2
++Classifier: Programming Language :: Python :: 2.7
++Classifier: Programming Language :: Python :: 3
++Classifier: Programming Language :: Python :: 3.4
++Classifier: Programming Language :: Python :: 3.5
++Classifier: Programming Language :: Python :: 3.6
++Classifier: Topic :: Software Development :: Libraries :: Python Modules
++Classifier: Topic :: System :: Operating System
++Description-Content-Type: text/markdown
+diff --git a/third_party/python/distro/README.md b/third_party/python/distro/README.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/README.md
+@@ -0,0 +1,140 @@
++Distro - an OS platform information API
++=======================================
++
++[![Build Status](https://travis-ci.org/nir0s/distro.svg?branch=master)](https://travis-ci.org/nir0s/distro)
++[![Build status](https://ci.appveyor.com/api/projects/status/e812qjk1gf0f74r5/branch/master?svg=true)](https://ci.appveyor.com/project/nir0s/distro/branch/master)
++[![PyPI version](http://img.shields.io/pypi/v/distro.svg)](https://pypi.python.org/pypi/distro)
++[![Supported Python Versions](https://img.shields.io/pypi/pyversions/distro.svg)](https://img.shields.io/pypi/pyversions/distro.svg)
++[![Requirements Status](https://requires.io/github/nir0s/distro/requirements.svg?branch=master)](https://requires.io/github/nir0s/distro/requirements/?branch=master)
++[![Code Coverage](https://codecov.io/github/nir0s/distro/coverage.svg?branch=master)](https://codecov.io/github/nir0s/distro?branch=master)
++[![Code Quality](https://landscape.io/github/nir0s/distro/master/landscape.svg?style=flat)](https://landscape.io/github/nir0s/distro)
++[![Is Wheel](https://img.shields.io/pypi/wheel/distro.svg?style=flat)](https://pypi.python.org/pypi/distro)
++[![Latest Github Release](https://readthedocs.org/projects/distro/badge/?version=stable)](http://distro.readthedocs.io/en/latest/)
++[![Join the chat at https://gitter.im/nir0s/distro](https://badges.gitter.im/nir0s/distro.svg)](https://gitter.im/nir0s/distro?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
++
++`distro` provides information about the
++OS distribution it runs on, such as a reliable machine-readable ID, or
++version information.
++
++It is the recommended replacement for Python's original
++[`platform.linux_distribution`](https://docs.python.org/3.7/library/platform.html#platform.linux_distribution)
++function (which will be removed in Python 3.8).
++It also provides much more functionality which isn't necessarily Python bound,
++like a command-line interface.
++
++Distro currently supports Linux and BSD based systems but [Windows and OS X support](https://github.com/nir0s/distro/issues/177) is also planned.
++
++For Python 2.6 support, see https://github.com/nir0s/distro/tree/python2.6-support
++
++## Installation
++
++Installation of the latest released version from PyPI:
++
++```shell
++pip install distro
++```
++
++Installation of the latest development version:
++
++```shell
++pip install https://github.com/nir0s/distro/archive/master.tar.gz
++```
++
++
++## Usage
++
++```bash
++$ distro
++Name: Antergos Linux
++Version: 2015.10 (ISO-Rolling)
++Codename: ISO-Rolling
++
++$ distro -j
++{
++ "codename": "ISO-Rolling",
++ "id": "antergos",
++ "like": "arch",
++ "version": "16.9",
++ "version_parts": {
++ "build_number": "",
++ "major": "16",
++ "minor": "9"
++ }
++}
++
++
++$ python
++>>> import distro
++>>> distro.linux_distribution(full_distribution_name=False)
++('centos', '7.1.1503', 'Core')
++```
++
++
++## Documentation
++
++On top of the aforementioned API, several more functions are available. For a complete description of the
++API, see the [latest API documentation](http://distro.readthedocs.org/en/latest/).
++
++## Background
++
++An alternative implementation became necessary because Python 3.5 deprecated
++this function, and Python 3.8 will remove it altogether.
++Its predecessor function `platform.dist` was already deprecated since
++Python 2.6 and will also be removed in Python 3.8.
++Still, there are many cases in which access to that information is needed.
++See [Python issue 1322](https://bugs.python.org/issue1322) for more
++information.
++
++The `distro` package implements a robust and inclusive way of retrieving the
++information about a distribution based on new standards and old methods,
++namely from these data sources (from high to low precedence):
++
++* The os-release file `/etc/os-release`, if present.
++* The output of the `lsb_release` command, if available.
++* The distro release file (`/etc/*(-|_)(release|version)`), if present.
++* The `uname` command for BSD based distrubtions.
++
++
++## Python and Distribution Support
++
++`distro` is supported and tested on Python 2.7, 3.4+ and PyPy and on
++any distribution that provides one or more of the data sources
++covered.
++
++This package is tested with test data that mimics the exact behavior of the data sources of [a number of Linux distributions](https://github.com/nir0s/distro/tree/master/tests/resources/distros).
++
++
++## Testing
++
++```shell
++git clone git@github.com:nir0s/distro.git
++cd distro
++pip install tox
++tox
++```
++
++
++## Contributions
++
++Pull requests are always welcome to deal with specific distributions or just
++for general merriment.
++
++See [CONTRIBUTIONS](https://github.com/nir0s/distro/blob/master/CONTRIBUTING.md) for contribution info.
++
++Reference implementations for supporting additional distributions and file
++formats can be found here:
++
++* https://github.com/saltstack/salt/blob/develop/salt/grains/core.py#L1172
++* https://github.com/chef/ohai/blob/master/lib/ohai/plugins/linux/platform.rb
++* https://github.com/ansible/ansible/blob/devel/lib/ansible/module_utils/facts/system/distribution.py
++* https://github.com/puppetlabs/facter/blob/master/lib/src/facts/linux/os_linux.cc
++
++## Package manager distributions
++
++* https://src.fedoraproject.org/rpms/python-distro
++* https://www.archlinux.org/packages/community/any/python-distro/
++* https://launchpad.net/ubuntu/+source/python-distro
++* https://packages.debian.org/sid/python-distro
++* https://packages.gentoo.org/packages/dev-python/distro
++* https://pkgs.org/download/python2-distro
++* https://slackbuilds.org/repository/14.2/python/python-distro/
+diff --git a/third_party/python/distro/dev-requirements.txt b/third_party/python/distro/dev-requirements.txt
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/dev-requirements.txt
+@@ -0,0 +1,3 @@
++pytest
++pytest-cov
++sphinx>=1.1
+\ No newline at end of file
+diff --git a/third_party/python/distro/distro.py b/third_party/python/distro/distro.py
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/distro.py
+@@ -0,0 +1,1216 @@
++# Copyright 2015,2016,2017 Nir Cohen
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++"""
++The ``distro`` package (``distro`` stands for Linux Distribution) provides
++information about the Linux distribution it runs on, such as a reliable
++machine-readable distro ID, or version information.
++
++It is the recommended replacement for Python's original
++:py:func:`platform.linux_distribution` function, but it provides much more
++functionality. An alternative implementation became necessary because Python
++3.5 deprecated this function, and Python 3.8 will remove it altogether.
++Its predecessor function :py:func:`platform.dist` was already
++deprecated since Python 2.6 and will also be removed in Python 3.8.
++Still, there are many cases in which access to OS distribution information
++is needed. See `Python issue 1322 <https://bugs.python.org/issue1322>`_ for
++more information.
++"""
++
++import os
++import re
++import sys
++import json
++import shlex
++import logging
++import argparse
++import subprocess
++
++
++_UNIXCONFDIR = os.environ.get('UNIXCONFDIR', '/etc')
++_OS_RELEASE_BASENAME = 'os-release'
++
++#: Translation table for normalizing the "ID" attribute defined in os-release
++#: files, for use by the :func:`distro.id` method.
++#:
++#: * Key: Value as defined in the os-release file, translated to lower case,
++#: with blanks translated to underscores.
++#:
++#: * Value: Normalized value.
++NORMALIZED_OS_ID = {
++ 'ol': 'oracle', # Oracle Enterprise Linux
++}
++
++#: Translation table for normalizing the "Distributor ID" attribute returned by
++#: the lsb_release command, for use by the :func:`distro.id` method.
++#:
++#: * Key: Value as returned by the lsb_release command, translated to lower
++#: case, with blanks translated to underscores.
++#:
++#: * Value: Normalized value.
++NORMALIZED_LSB_ID = {
++ 'enterpriseenterprise': 'oracle', # Oracle Enterprise Linux
++ 'redhatenterpriseworkstation': 'rhel', # RHEL 6, 7 Workstation
++ 'redhatenterpriseserver': 'rhel', # RHEL 6, 7 Server
++}
++
++#: Translation table for normalizing the distro ID derived from the file name
++#: of distro release files, for use by the :func:`distro.id` method.
++#:
++#: * Key: Value as derived from the file name of a distro release file,
++#: translated to lower case, with blanks translated to underscores.
++#:
++#: * Value: Normalized value.
++NORMALIZED_DISTRO_ID = {
++ 'redhat': 'rhel', # RHEL 6.x, 7.x
++}
++
++# Pattern for content of distro release file (reversed)
++_DISTRO_RELEASE_CONTENT_REVERSED_PATTERN = re.compile(
++ r'(?:[^)]*\)(.*)\()? *(?:STL )?([\d.+\-a-z]*\d) *(?:esaeler *)?(.+)')
++
++# Pattern for base file name of distro release file
++_DISTRO_RELEASE_BASENAME_PATTERN = re.compile(
++ r'(\w+)[-_](release|version)$')
++
++# Base file names to be ignored when searching for distro release file
++_DISTRO_RELEASE_IGNORE_BASENAMES = (
++ 'debian_version',
++ 'lsb-release',
++ 'oem-release',
++ _OS_RELEASE_BASENAME,
++ 'system-release'
++)
++
++
++def linux_distribution(full_distribution_name=True):
++ """
++ Return information about the current OS distribution as a tuple
++ ``(id_name, version, codename)`` with items as follows:
++
++ * ``id_name``: If *full_distribution_name* is false, the result of
++ :func:`distro.id`. Otherwise, the result of :func:`distro.name`.
++
++ * ``version``: The result of :func:`distro.version`.
++
++ * ``codename``: The result of :func:`distro.codename`.
++
++ The interface of this function is compatible with the original
++ :py:func:`platform.linux_distribution` function, supporting a subset of
++ its parameters.
++
++ The data it returns may not exactly be the same, because it uses more data
++ sources than the original function, and that may lead to different data if
++ the OS distribution is not consistent across multiple data sources it
++ provides (there are indeed such distributions ...).
++
++ Another reason for differences is the fact that the :func:`distro.id`
++ method normalizes the distro ID string to a reliable machine-readable value
++ for a number of popular OS distributions.
++ """
++ return _distro.linux_distribution(full_distribution_name)
++
++
++def id():
++ """
++ Return the distro ID of the current distribution, as a
++ machine-readable string.
++
++ For a number of OS distributions, the returned distro ID value is
++ *reliable*, in the sense that it is documented and that it does not change
++ across releases of the distribution.
++
++ This package maintains the following reliable distro ID values:
++
++ ============== =========================================
++ Distro ID Distribution
++ ============== =========================================
++ "ubuntu" Ubuntu
++ "debian" Debian
++ "rhel" RedHat Enterprise Linux
++ "centos" CentOS
++ "fedora" Fedora
++ "sles" SUSE Linux Enterprise Server
++ "opensuse" openSUSE
++ "amazon" Amazon Linux
++ "arch" Arch Linux
++ "cloudlinux" CloudLinux OS
++ "exherbo" Exherbo Linux
++ "gentoo" GenToo Linux
++ "ibm_powerkvm" IBM PowerKVM
++ "kvmibm" KVM for IBM z Systems
++ "linuxmint" Linux Mint
++ "mageia" Mageia
++ "mandriva" Mandriva Linux
++ "parallels" Parallels
++ "pidora" Pidora
++ "raspbian" Raspbian
++ "oracle" Oracle Linux (and Oracle Enterprise Linux)
++ "scientific" Scientific Linux
++ "slackware" Slackware
++ "xenserver" XenServer
++ "openbsd" OpenBSD
++ "netbsd" NetBSD
++ "freebsd" FreeBSD
++ ============== =========================================
++
++ If you have a need to get distros for reliable IDs added into this set,
++ or if you find that the :func:`distro.id` function returns a different
++ distro ID for one of the listed distros, please create an issue in the
++ `distro issue tracker`_.
++
++ **Lookup hierarchy and transformations:**
++
++ First, the ID is obtained from the following sources, in the specified
++ order. The first available and non-empty value is used:
++
++ * the value of the "ID" attribute of the os-release file,
++
++ * the value of the "Distributor ID" attribute returned by the lsb_release
++ command,
++
++ * the first part of the file name of the distro release file,
++
++ The so determined ID value then passes the following transformations,
++ before it is returned by this method:
++
++ * it is translated to lower case,
++
++ * blanks (which should not be there anyway) are translated to underscores,
++
++ * a normalization of the ID is performed, based upon
++ `normalization tables`_. The purpose of this normalization is to ensure
++ that the ID is as reliable as possible, even across incompatible changes
++ in the OS distributions. A common reason for an incompatible change is
++ the addition of an os-release file, or the addition of the lsb_release
++ command, with ID values that differ from what was previously determined
++ from the distro release file name.
++ """
++ return _distro.id()
++
++
++def name(pretty=False):
++ """
++ Return the name of the current OS distribution, as a human-readable
++ string.
++
++ If *pretty* is false, the name is returned without version or codename.
++ (e.g. "CentOS Linux")
++
++ If *pretty* is true, the version and codename are appended.
++ (e.g. "CentOS Linux 7.1.1503 (Core)")
++
++ **Lookup hierarchy:**
++
++ The name is obtained from the following sources, in the specified order.
++ The first available and non-empty value is used:
++
++ * If *pretty* is false:
++
++ - the value of the "NAME" attribute of the os-release file,
++
++ - the value of the "Distributor ID" attribute returned by the lsb_release
++ command,
++
++ - the value of the "<name>" field of the distro release file.
++
++ * If *pretty* is true:
++
++ - the value of the "PRETTY_NAME" attribute of the os-release file,
++
++ - the value of the "Description" attribute returned by the lsb_release
++ command,
++
++ - the value of the "<name>" field of the distro release file, appended
++ with the value of the pretty version ("<version_id>" and "<codename>"
++ fields) of the distro release file, if available.
++ """
++ return _distro.name(pretty)
++
++
++def version(pretty=False, best=False):
++ """
++ Return the version of the current OS distribution, as a human-readable
++ string.
++
++ If *pretty* is false, the version is returned without codename (e.g.
++ "7.0").
++
++ If *pretty* is true, the codename in parenthesis is appended, if the
++ codename is non-empty (e.g. "7.0 (Maipo)").
++
++ Some distributions provide version numbers with different precisions in
++ the different sources of distribution information. Examining the different
++ sources in a fixed priority order does not always yield the most precise
++ version (e.g. for Debian 8.2, or CentOS 7.1).
++
++ The *best* parameter can be used to control the approach for the returned
++ version:
++
++ If *best* is false, the first non-empty version number in priority order of
++ the examined sources is returned.
++
++ If *best* is true, the most precise version number out of all examined
++ sources is returned.
++
++ **Lookup hierarchy:**
++
++ In all cases, the version number is obtained from the following sources.
++ If *best* is false, this order represents the priority order:
++
++ * the value of the "VERSION_ID" attribute of the os-release file,
++ * the value of the "Release" attribute returned by the lsb_release
++ command,
++ * the version number parsed from the "<version_id>" field of the first line
++ of the distro release file,
++ * the version number parsed from the "PRETTY_NAME" attribute of the
++ os-release file, if it follows the format of the distro release files.
++ * the version number parsed from the "Description" attribute returned by
++ the lsb_release command, if it follows the format of the distro release
++ files.
++ """
++ return _distro.version(pretty, best)
++
++
++def version_parts(best=False):
++ """
++ Return the version of the current OS distribution as a tuple
++ ``(major, minor, build_number)`` with items as follows:
++
++ * ``major``: The result of :func:`distro.major_version`.
++
++ * ``minor``: The result of :func:`distro.minor_version`.
++
++ * ``build_number``: The result of :func:`distro.build_number`.
++
++ For a description of the *best* parameter, see the :func:`distro.version`
++ method.
++ """
++ return _distro.version_parts(best)
++
++
++def major_version(best=False):
++ """
++ Return the major version of the current OS distribution, as a string,
++ if provided.
++ Otherwise, the empty string is returned. The major version is the first
++ part of the dot-separated version string.
++
++ For a description of the *best* parameter, see the :func:`distro.version`
++ method.
++ """
++ return _distro.major_version(best)
++
++
++def minor_version(best=False):
++ """
++ Return the minor version of the current OS distribution, as a string,
++ if provided.
++ Otherwise, the empty string is returned. The minor version is the second
++ part of the dot-separated version string.
++
++ For a description of the *best* parameter, see the :func:`distro.version`
++ method.
++ """
++ return _distro.minor_version(best)
++
++
++def build_number(best=False):
++ """
++ Return the build number of the current OS distribution, as a string,
++ if provided.
++ Otherwise, the empty string is returned. The build number is the third part
++ of the dot-separated version string.
++
++ For a description of the *best* parameter, see the :func:`distro.version`
++ method.
++ """
++ return _distro.build_number(best)
++
++
++def like():
++ """
++ Return a space-separated list of distro IDs of distributions that are
++ closely related to the current OS distribution in regards to packaging
++ and programming interfaces, for example distributions the current
++ distribution is a derivative from.
++
++ **Lookup hierarchy:**
++
++ This information item is only provided by the os-release file.
++ For details, see the description of the "ID_LIKE" attribute in the
++ `os-release man page
++ <http://www.freedesktop.org/software/systemd/man/os-release.html>`_.
++ """
++ return _distro.like()
++
++
++def codename():
++ """
++ Return the codename for the release of the current OS distribution,
++ as a string.
++
++ If the distribution does not have a codename, an empty string is returned.
++
++ Note that the returned codename is not always really a codename. For
++ example, openSUSE returns "x86_64". This function does not handle such
++ cases in any special way and just returns the string it finds, if any.
++
++ **Lookup hierarchy:**
++
++ * the codename within the "VERSION" attribute of the os-release file, if
++ provided,
++
++ * the value of the "Codename" attribute returned by the lsb_release
++ command,
++
++ * the value of the "<codename>" field of the distro release file.
++ """
++ return _distro.codename()
++
++
++def info(pretty=False, best=False):
++ """
++ Return certain machine-readable information items about the current OS
++ distribution in a dictionary, as shown in the following example:
++
++ .. sourcecode:: python
++
++ {
++ 'id': 'rhel',
++ 'version': '7.0',
++ 'version_parts': {
++ 'major': '7',
++ 'minor': '0',
++ 'build_number': ''
++ },
++ 'like': 'fedora',
++ 'codename': 'Maipo'
++ }
++
++ The dictionary structure and keys are always the same, regardless of which
++ information items are available in the underlying data sources. The values
++ for the various keys are as follows:
++
++ * ``id``: The result of :func:`distro.id`.
++
++ * ``version``: The result of :func:`distro.version`.
++
++ * ``version_parts -> major``: The result of :func:`distro.major_version`.
++
++ * ``version_parts -> minor``: The result of :func:`distro.minor_version`.
++
++ * ``version_parts -> build_number``: The result of
++ :func:`distro.build_number`.
++
++ * ``like``: The result of :func:`distro.like`.
++
++ * ``codename``: The result of :func:`distro.codename`.
++
++ For a description of the *pretty* and *best* parameters, see the
++ :func:`distro.version` method.
++ """
++ return _distro.info(pretty, best)
++
++
++def os_release_info():
++ """
++ Return a dictionary containing key-value pairs for the information items
++ from the os-release file data source of the current OS distribution.
++
++ See `os-release file`_ for details about these information items.
++ """
++ return _distro.os_release_info()
++
++
++def lsb_release_info():
++ """
++ Return a dictionary containing key-value pairs for the information items
++ from the lsb_release command data source of the current OS distribution.
++
++ See `lsb_release command output`_ for details about these information
++ items.
++ """
++ return _distro.lsb_release_info()
++
++
++def distro_release_info():
++ """
++ Return a dictionary containing key-value pairs for the information items
++ from the distro release file data source of the current OS distribution.
++
++ See `distro release file`_ for details about these information items.
++ """
++ return _distro.distro_release_info()
++
++
++def uname_info():
++ """
++ Return a dictionary containing key-value pairs for the information items
++ from the distro release file data source of the current OS distribution.
++ """
++ return _distro.uname_info()
++
++
++def os_release_attr(attribute):
++ """
++ Return a single named information item from the os-release file data source
++ of the current OS distribution.
++
++ Parameters:
++
++ * ``attribute`` (string): Key of the information item.
++
++ Returns:
++
++ * (string): Value of the information item, if the item exists.
++ The empty string, if the item does not exist.
++
++ See `os-release file`_ for details about these information items.
++ """
++ return _distro.os_release_attr(attribute)
++
++
++def lsb_release_attr(attribute):
++ """
++ Return a single named information item from the lsb_release command output
++ data source of the current OS distribution.
++
++ Parameters:
++
++ * ``attribute`` (string): Key of the information item.
++
++ Returns:
++
++ * (string): Value of the information item, if the item exists.
++ The empty string, if the item does not exist.
++
++ See `lsb_release command output`_ for details about these information
++ items.
++ """
++ return _distro.lsb_release_attr(attribute)
++
++
++def distro_release_attr(attribute):
++ """
++ Return a single named information item from the distro release file
++ data source of the current OS distribution.
++
++ Parameters:
++
++ * ``attribute`` (string): Key of the information item.
++
++ Returns:
++
++ * (string): Value of the information item, if the item exists.
++ The empty string, if the item does not exist.
++
++ See `distro release file`_ for details about these information items.
++ """
++ return _distro.distro_release_attr(attribute)
++
++
++def uname_attr(attribute):
++ """
++ Return a single named information item from the distro release file
++ data source of the current OS distribution.
++
++ Parameters:
++
++ * ``attribute`` (string): Key of the information item.
++
++ Returns:
++
++ * (string): Value of the information item, if the item exists.
++ The empty string, if the item does not exist.
++ """
++ return _distro.uname_attr(attribute)
++
++
++class cached_property(object):
++ """A version of @property which caches the value. On access, it calls the
++ underlying function and sets the value in `__dict__` so future accesses
++ will not re-call the property.
++ """
++ def __init__(self, f):
++ self._fname = f.__name__
++ self._f = f
++
++ def __get__(self, obj, owner):
++ assert obj is not None, 'call {} on an instance'.format(self._fname)
++ ret = obj.__dict__[self._fname] = self._f(obj)
++ return ret
++
++
++class LinuxDistribution(object):
++ """
++ Provides information about a OS distribution.
++
++ This package creates a private module-global instance of this class with
++ default initialization arguments, that is used by the
++ `consolidated accessor functions`_ and `single source accessor functions`_.
++ By using default initialization arguments, that module-global instance
++ returns data about the current OS distribution (i.e. the distro this
++ package runs on).
++
++ Normally, it is not necessary to create additional instances of this class.
++ However, in situations where control is needed over the exact data sources
++ that are used, instances of this class can be created with a specific
++ distro release file, or a specific os-release file, or without invoking the
++ lsb_release command.
++ """
++
++ def __init__(self,
++ include_lsb=True,
++ os_release_file='',
++ distro_release_file='',
++ include_uname=True):
++ """
++ The initialization method of this class gathers information from the
++ available data sources, and stores that in private instance attributes.
++ Subsequent access to the information items uses these private instance
++ attributes, so that the data sources are read only once.
++
++ Parameters:
++
++ * ``include_lsb`` (bool): Controls whether the
++ `lsb_release command output`_ is included as a data source.
++
++ If the lsb_release command is not available in the program execution
++ path, the data source for the lsb_release command will be empty.
++
++ * ``os_release_file`` (string): The path name of the
++ `os-release file`_ that is to be used as a data source.
++
++ An empty string (the default) will cause the default path name to
++ be used (see `os-release file`_ for details).
++
++ If the specified or defaulted os-release file does not exist, the
++ data source for the os-release file will be empty.
++
++ * ``distro_release_file`` (string): The path name of the
++ `distro release file`_ that is to be used as a data source.
++
++ An empty string (the default) will cause a default search algorithm
++ to be used (see `distro release file`_ for details).
++
++ If the specified distro release file does not exist, or if no default
++ distro release file can be found, the data source for the distro
++ release file will be empty.
++
++ * ``include_name`` (bool): Controls whether uname command output is
++ included as a data source. If the uname command is not available in
++ the program execution path the data source for the uname command will
++ be empty.
++
++ Public instance attributes:
++
++ * ``os_release_file`` (string): The path name of the
++ `os-release file`_ that is actually used as a data source. The
++ empty string if no distro release file is used as a data source.
++
++ * ``distro_release_file`` (string): The path name of the
++ `distro release file`_ that is actually used as a data source. The
++ empty string if no distro release file is used as a data source.
++
++ * ``include_lsb`` (bool): The result of the ``include_lsb`` parameter.
++ This controls whether the lsb information will be loaded.
++
++ * ``include_uname`` (bool): The result of the ``include_uname``
++ parameter. This controls whether the uname information will
++ be loaded.
++
++ Raises:
++
++ * :py:exc:`IOError`: Some I/O issue with an os-release file or distro
++ release file.
++
++ * :py:exc:`subprocess.CalledProcessError`: The lsb_release command had
++ some issue (other than not being available in the program execution
++ path).
++
++ * :py:exc:`UnicodeError`: A data source has unexpected characters or
++ uses an unexpected encoding.
++ """
++ self.os_release_file = os_release_file or \
++ os.path.join(_UNIXCONFDIR, _OS_RELEASE_BASENAME)
++ self.distro_release_file = distro_release_file or '' # updated later
++ self.include_lsb = include_lsb
++ self.include_uname = include_uname
++
++ def __repr__(self):
++ """Return repr of all info
++ """
++ return \
++ "LinuxDistribution(" \
++ "os_release_file={self.os_release_file!r}, " \
++ "distro_release_file={self.distro_release_file!r}, " \
++ "include_lsb={self.include_lsb!r}, " \
++ "include_uname={self.include_uname!r}, " \
++ "_os_release_info={self._os_release_info!r}, " \
++ "_lsb_release_info={self._lsb_release_info!r}, " \
++ "_distro_release_info={self._distro_release_info!r}, " \
++ "_uname_info={self._uname_info!r})".format(
++ self=self)
++
++ def linux_distribution(self, full_distribution_name=True):
++ """
++ Return information about the OS distribution that is compatible
++ with Python's :func:`platform.linux_distribution`, supporting a subset
++ of its parameters.
++
++ For details, see :func:`distro.linux_distribution`.
++ """
++ return (
++ self.name() if full_distribution_name else self.id(),
++ self.version(),
++ self.codename()
++ )
++
++ def id(self):
++ """Return the distro ID of the OS distribution, as a string.
++
++ For details, see :func:`distro.id`.
++ """
++ def normalize(distro_id, table):
++ distro_id = distro_id.lower().replace(' ', '_')
++ return table.get(distro_id, distro_id)
++
++ distro_id = self.os_release_attr('id')
++ if distro_id:
++ return normalize(distro_id, NORMALIZED_OS_ID)
++
++ distro_id = self.lsb_release_attr('distributor_id')
++ if distro_id:
++ return normalize(distro_id, NORMALIZED_LSB_ID)
++
++ distro_id = self.distro_release_attr('id')
++ if distro_id:
++ return normalize(distro_id, NORMALIZED_DISTRO_ID)
++
++ distro_id = self.uname_attr('id')
++ if distro_id:
++ return normalize(distro_id, NORMALIZED_DISTRO_ID)
++
++ return ''
++
++ def name(self, pretty=False):
++ """
++ Return the name of the OS distribution, as a string.
++
++ For details, see :func:`distro.name`.
++ """
++ name = self.os_release_attr('name') \
++ or self.lsb_release_attr('distributor_id') \
++ or self.distro_release_attr('name') \
++ or self.uname_attr('name')
++ if pretty:
++ name = self.os_release_attr('pretty_name') \
++ or self.lsb_release_attr('description')
++ if not name:
++ name = self.distro_release_attr('name') \
++ or self.uname_attr('name')
++ version = self.version(pretty=True)
++ if version:
++ name = name + ' ' + version
++ return name or ''
++
++ def version(self, pretty=False, best=False):
++ """
++ Return the version of the OS distribution, as a string.
++
++ For details, see :func:`distro.version`.
++ """
++ versions = [
++ self.os_release_attr('version_id'),
++ self.lsb_release_attr('release'),
++ self.distro_release_attr('version_id'),
++ self._parse_distro_release_content(
++ self.os_release_attr('pretty_name')).get('version_id', ''),
++ self._parse_distro_release_content(
++ self.lsb_release_attr('description')).get('version_id', ''),
++ self.uname_attr('release')
++ ]
++ version = ''
++ if best:
++ # This algorithm uses the last version in priority order that has
++ # the best precision. If the versions are not in conflict, that
++ # does not matter; otherwise, using the last one instead of the
++ # first one might be considered a surprise.
++ for v in versions:
++ if v.count(".") > version.count(".") or version == '':
++ version = v
++ else:
++ for v in versions:
++ if v != '':
++ version = v
++ break
++ if pretty and version and self.codename():
++ version = u'{0} ({1})'.format(version, self.codename())
++ return version
++
++ def version_parts(self, best=False):
++ """
++ Return the version of the OS distribution, as a tuple of version
++ numbers.
++
++ For details, see :func:`distro.version_parts`.
++ """
++ version_str = self.version(best=best)
++ if version_str:
++ version_regex = re.compile(r'(\d+)\.?(\d+)?\.?(\d+)?')
++ matches = version_regex.match(version_str)
++ if matches:
++ major, minor, build_number = matches.groups()
++ return major, minor or '', build_number or ''
++ return '', '', ''
++
++ def major_version(self, best=False):
++ """
++ Return the major version number of the current distribution.
++
++ For details, see :func:`distro.major_version`.
++ """
++ return self.version_parts(best)[0]
++
++ def minor_version(self, best=False):
++ """
++ Return the minor version number of the current distribution.
++
++ For details, see :func:`distro.minor_version`.
++ """
++ return self.version_parts(best)[1]
++
++ def build_number(self, best=False):
++ """
++ Return the build number of the current distribution.
++
++ For details, see :func:`distro.build_number`.
++ """
++ return self.version_parts(best)[2]
++
++ def like(self):
++ """
++ Return the IDs of distributions that are like the OS distribution.
++
++ For details, see :func:`distro.like`.
++ """
++ return self.os_release_attr('id_like') or ''
++
++ def codename(self):
++ """
++ Return the codename of the OS distribution.
++
++ For details, see :func:`distro.codename`.
++ """
++ try:
++ # Handle os_release specially since distros might purposefully set
++ # this to empty string to have no codename
++ return self._os_release_info['codename']
++ except KeyError:
++ return self.lsb_release_attr('codename') \
++ or self.distro_release_attr('codename') \
++ or ''
++
++ def info(self, pretty=False, best=False):
++ """
++ Return certain machine-readable information about the OS
++ distribution.
++
++ For details, see :func:`distro.info`.
++ """
++ return dict(
++ id=self.id(),
++ version=self.version(pretty, best),
++ version_parts=dict(
++ major=self.major_version(best),
++ minor=self.minor_version(best),
++ build_number=self.build_number(best)
++ ),
++ like=self.like(),
++ codename=self.codename(),
++ )
++
++ def os_release_info(self):
++ """
++ Return a dictionary containing key-value pairs for the information
++ items from the os-release file data source of the OS distribution.
++
++ For details, see :func:`distro.os_release_info`.
++ """
++ return self._os_release_info
++
++ def lsb_release_info(self):
++ """
++ Return a dictionary containing key-value pairs for the information
++ items from the lsb_release command data source of the OS
++ distribution.
++
++ For details, see :func:`distro.lsb_release_info`.
++ """
++ return self._lsb_release_info
++
++ def distro_release_info(self):
++ """
++ Return a dictionary containing key-value pairs for the information
++ items from the distro release file data source of the OS
++ distribution.
++
++ For details, see :func:`distro.distro_release_info`.
++ """
++ return self._distro_release_info
++
++ def uname_info(self):
++ """
++ Return a dictionary containing key-value pairs for the information
++ items from the uname command data source of the OS distribution.
++
++ For details, see :func:`distro.uname_info`.
++ """
++ return self._uname_info
++
++ def os_release_attr(self, attribute):
++ """
++ Return a single named information item from the os-release file data
++ source of the OS distribution.
++
++ For details, see :func:`distro.os_release_attr`.
++ """
++ return self._os_release_info.get(attribute, '')
++
++ def lsb_release_attr(self, attribute):
++ """
++ Return a single named information item from the lsb_release command
++ output data source of the OS distribution.
++
++ For details, see :func:`distro.lsb_release_attr`.
++ """
++ return self._lsb_release_info.get(attribute, '')
++
++ def distro_release_attr(self, attribute):
++ """
++ Return a single named information item from the distro release file
++ data source of the OS distribution.
++
++ For details, see :func:`distro.distro_release_attr`.
++ """
++ return self._distro_release_info.get(attribute, '')
++
++ def uname_attr(self, attribute):
++ """
++ Return a single named information item from the uname command
++ output data source of the OS distribution.
++
++ For details, see :func:`distro.uname_release_attr`.
++ """
++ return self._uname_info.get(attribute, '')
++
++ @cached_property
++ def _os_release_info(self):
++ """
++ Get the information items from the specified os-release file.
++
++ Returns:
++ A dictionary containing all information items.
++ """
++ if os.path.isfile(self.os_release_file):
++ with open(self.os_release_file) as release_file:
++ return self._parse_os_release_content(release_file)
++ return {}
++
++ @staticmethod
++ def _parse_os_release_content(lines):
++ """
++ Parse the lines of an os-release file.
++
++ Parameters:
++
++ * lines: Iterable through the lines in the os-release file.
++ Each line must be a unicode string or a UTF-8 encoded byte
++ string.
++
++ Returns:
++ A dictionary containing all information items.
++ """
++ props = {}
++ lexer = shlex.shlex(lines, posix=True)
++ lexer.whitespace_split = True
++
++ # The shlex module defines its `wordchars` variable using literals,
++ # making it dependent on the encoding of the Python source file.
++ # In Python 2.6 and 2.7, the shlex source file is encoded in
++ # 'iso-8859-1', and the `wordchars` variable is defined as a byte
++ # string. This causes a UnicodeDecodeError to be raised when the
++ # parsed content is a unicode object. The following fix resolves that
++ # (... but it should be fixed in shlex...):
++ if sys.version_info[0] == 2 and isinstance(lexer.wordchars, bytes):
++ lexer.wordchars = lexer.wordchars.decode('iso-8859-1')
++
++ tokens = list(lexer)
++ for token in tokens:
++ # At this point, all shell-like parsing has been done (i.e.
++ # comments processed, quotes and backslash escape sequences
++ # processed, multi-line values assembled, trailing newlines
++ # stripped, etc.), so the tokens are now either:
++ # * variable assignments: var=value
++ # * commands or their arguments (not allowed in os-release)
++ if '=' in token:
++ k, v = token.split('=', 1)
++ if isinstance(v, bytes):
++ v = v.decode('utf-8')
++ props[k.lower()] = v
++ else:
++ # Ignore any tokens that are not variable assignments
++ pass
++
++ if 'version_codename' in props:
++ # os-release added a version_codename field. Use that in
++ # preference to anything else Note that some distros purposefully
++ # do not have code names. They should be setting
++ # version_codename=""
++ props['codename'] = props['version_codename']
++ elif 'ubuntu_codename' in props:
++ # Same as above but a non-standard field name used on older Ubuntus
++ props['codename'] = props['ubuntu_codename']
++ elif 'version' in props:
++ # If there is no version_codename, parse it from the version
++ codename = re.search(r'(\(\D+\))|,(\s+)?\D+', props['version'])
++ if codename:
++ codename = codename.group()
++ codename = codename.strip('()')
++ codename = codename.strip(',')
++ codename = codename.strip()
++ # codename appears within paranthese.
++ props['codename'] = codename
++
++ return props
++
++ @cached_property
++ def _lsb_release_info(self):
++ """
++ Get the information items from the lsb_release command output.
++
++ Returns:
++ A dictionary containing all information items.
++ """
++ if not self.include_lsb:
++ return {}
++ with open(os.devnull, 'w') as devnull:
++ try:
++ cmd = ('lsb_release', '-a')
++ stdout = subprocess.check_output(cmd, stderr=devnull)
++ except OSError: # Command not found
++ return {}
++ content = stdout.decode(sys.getfilesystemencoding()).splitlines()
++ return self._parse_lsb_release_content(content)
++
++ @staticmethod
++ def _parse_lsb_release_content(lines):
++ """
++ Parse the output of the lsb_release command.
++
++ Parameters:
++
++ * lines: Iterable through the lines of the lsb_release output.
++ Each line must be a unicode string or a UTF-8 encoded byte
++ string.
++
++ Returns:
++ A dictionary containing all information items.
++ """
++ props = {}
++ for line in lines:
++ kv = line.strip('\n').split(':', 1)
++ if len(kv) != 2:
++ # Ignore lines without colon.
++ continue
++ k, v = kv
++ props.update({k.replace(' ', '_').lower(): v.strip()})
++ return props
++
++ @cached_property
++ def _uname_info(self):
++ with open(os.devnull, 'w') as devnull:
++ try:
++ cmd = ('uname', '-rs')
++ stdout = subprocess.check_output(cmd, stderr=devnull)
++ except OSError:
++ return {}
++ content = stdout.decode(sys.getfilesystemencoding()).splitlines()
++ return self._parse_uname_content(content)
++
++ @staticmethod
++ def _parse_uname_content(lines):
++ props = {}
++ match = re.search(r'^([^\s]+)\s+([\d\.]+)', lines[0].strip())
++ if match:
++ name, version = match.groups()
++
++ # This is to prevent the Linux kernel version from
++ # appearing as the 'best' version on otherwise
++ # identifiable distributions.
++ if name == 'Linux':
++ return {}
++ props['id'] = name.lower()
++ props['name'] = name
++ props['release'] = version
++ return props
++
++ @cached_property
++ def _distro_release_info(self):
++ """
++ Get the information items from the specified distro release file.
++
++ Returns:
++ A dictionary containing all information items.
++ """
++ if self.distro_release_file:
++ # If it was specified, we use it and parse what we can, even if
++ # its file name or content does not match the expected pattern.
++ distro_info = self._parse_distro_release_file(
++ self.distro_release_file)
++ basename = os.path.basename(self.distro_release_file)
++ # The file name pattern for user-specified distro release files
++ # is somewhat more tolerant (compared to when searching for the
++ # file), because we want to use what was specified as best as
++ # possible.
++ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
++ if 'name' in distro_info \
++ and 'cloudlinux' in distro_info['name'].lower():
++ distro_info['id'] = 'cloudlinux'
++ elif match:
++ distro_info['id'] = match.group(1)
++ return distro_info
++ else:
++ try:
++ basenames = os.listdir(_UNIXCONFDIR)
++ # We sort for repeatability in cases where there are multiple
++ # distro specific files; e.g. CentOS, Oracle, Enterprise all
++ # containing `redhat-release` on top of their own.
++ basenames.sort()
++ except OSError:
++ # This may occur when /etc is not readable but we can't be
++ # sure about the *-release files. Check common entries of
++ # /etc for information. If they turn out to not be there the
++ # error is handled in `_parse_distro_release_file()`.
++ basenames = ['SuSE-release',
++ 'arch-release',
++ 'base-release',
++ 'centos-release',
++ 'fedora-release',
++ 'gentoo-release',
++ 'mageia-release',
++ 'mandrake-release',
++ 'mandriva-release',
++ 'mandrivalinux-release',
++ 'manjaro-release',
++ 'oracle-release',
++ 'redhat-release',
++ 'sl-release',
++ 'slackware-version']
++ for basename in basenames:
++ if basename in _DISTRO_RELEASE_IGNORE_BASENAMES:
++ continue
++ match = _DISTRO_RELEASE_BASENAME_PATTERN.match(basename)
++ if match:
++ filepath = os.path.join(_UNIXCONFDIR, basename)
++ distro_info = self._parse_distro_release_file(filepath)
++ if 'name' in distro_info:
++ # The name is always present if the pattern matches
++ self.distro_release_file = filepath
++ distro_info['id'] = match.group(1)
++ if 'cloudlinux' in distro_info['name'].lower():
++ distro_info['id'] = 'cloudlinux'
++ return distro_info
++ return {}
++
++ def _parse_distro_release_file(self, filepath):
++ """
++ Parse a distro release file.
++
++ Parameters:
++
++ * filepath: Path name of the distro release file.
++
++ Returns:
++ A dictionary containing all information items.
++ """
++ try:
++ with open(filepath) as fp:
++ # Only parse the first line. For instance, on SLES there
++ # are multiple lines. We don't want them...
++ return self._parse_distro_release_content(fp.readline())
++ except (OSError, IOError):
++ # Ignore not being able to read a specific, seemingly version
++ # related file.
++ # See https://github.com/nir0s/distro/issues/162
++ return {}
++
++ @staticmethod
++ def _parse_distro_release_content(line):
++ """
++ Parse a line from a distro release file.
++
++ Parameters:
++ * line: Line from the distro release file. Must be a unicode string
++ or a UTF-8 encoded byte string.
++
++ Returns:
++ A dictionary containing all information items.
++ """
++ if isinstance(line, bytes):
++ line = line.decode('utf-8')
++ matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(
++ line.strip()[::-1])
++ distro_info = {}
++ if matches:
++ # regexp ensures non-None
++ distro_info['name'] = matches.group(3)[::-1]
++ if matches.group(2):
++ distro_info['version_id'] = matches.group(2)[::-1]
++ if matches.group(1):
++ distro_info['codename'] = matches.group(1)[::-1]
++ elif line:
++ distro_info['name'] = line.strip()
++ return distro_info
++
++
++_distro = LinuxDistribution()
++
++
++def main():
++ logger = logging.getLogger(__name__)
++ logger.setLevel(logging.DEBUG)
++ logger.addHandler(logging.StreamHandler(sys.stdout))
++
++ parser = argparse.ArgumentParser(description="OS distro info tool")
++ parser.add_argument(
++ '--json',
++ '-j',
++ help="Output in machine readable format",
++ action="store_true")
++ args = parser.parse_args()
++
++ if args.json:
++ logger.info(json.dumps(info(), indent=4, sort_keys=True))
++ else:
++ logger.info('Name: %s', name(pretty=True))
++ distribution_version = version(pretty=True)
++ logger.info('Version: %s', distribution_version)
++ distribution_codename = codename()
++ logger.info('Codename: %s', distribution_codename)
++
++
++if __name__ == '__main__':
++ main()
+diff --git a/third_party/python/distro/docs/conf.py b/third_party/python/distro/docs/conf.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/docs/conf.py
+@@ -0,0 +1,342 @@
++# -*- coding: utf-8 -*-
++#
++# Configuration file for Sphinx builds, created by
++# sphinx-quickstart on Wed Mar 2 11:33:06 2016.
++#
++# This file is execfile()d with the current directory set to its
++# containing dir.
++#
++# Note that not all possible configuration values are present in this
++# autogenerated file.
++#
++# All configuration values have a default; values that are commented out
++# serve to show the default.
++
++import sys
++import os
++import re
++
++# If extensions (or modules to document with autodoc) are in another directory,
++# add these directories to sys.path here. If the directory is relative to the
++# documentation root, use os.path.abspath to make it absolute, like shown here.
++sys.path.insert(0, os.path.abspath('..'))
++
++# -- General configuration ------------------------------------------------
++
++# If your documentation needs a minimal Sphinx version, state it here.
++needs_sphinx = '1.1'
++
++# Add any Sphinx extension module names here, as strings. They can be
++# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
++# ones.
++extensions = [
++ 'sphinx.ext.autodoc',
++ 'sphinx.ext.intersphinx',
++ 'sphinx.ext.todo',
++ 'sphinx.ext.coverage',
++ 'sphinx.ext.viewcode',
++]
++
++# Add any paths that contain templates here, relative to this directory.
++templates_path = ['_templates']
++
++# The suffix(es) of source filenames.
++# You can specify multiple suffix as a list of string:
++# source_suffix = ['.rst', '.md']
++source_suffix = '.rst'
++
++# The encoding of source files.
++source_encoding = 'utf-8'
++
++# The master toctree document.
++on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
++if on_rtd:
++ master_doc = 'index'
++else:
++ master_doc = 'docs/index'
++
++# General information about the project.
++project = u'distro'
++copyright = u'2015,2016, Nir Cohen, Andreas Maier'
++author = u'Nir Cohen, Andreas Maier'
++
++# The short description of the package.
++_short_description = u'Linux Distribution - a Linux OS platform information API'
++
++# The version info for the project you're documenting, acts as replacement for
++# |version| and |release|, also used in various other places throughout the
++# built documents.
++
++def parse_version():
++ with open('../setup.py', 'r') as _fp:
++ _lines = _fp.readlines()
++ for _line in _lines:
++ m = re.match(r'^package_version *= *[\'"](.+)[\'"].*$', _line)
++ if m:
++ break
++ if m:
++ return m.group(1)
++ else:
++ return 'unknown'
++
++# The short X.Y version.
++# Note: We use the full version in both cases.
++version = parse_version()
++
++# The full version, including alpha/beta/rc tags.
++release = version
++
++# The language for content autogenerated by Sphinx. Refer to documentation
++# for a list of supported languages.
++#
++# This is also used if you do content translation via gettext catalogs.
++# Usually you set "language" from the command line for these cases.
++language = None
++
++# There are two options for replacing |today|: either, you set today to some
++# non-false value, then it is used:
++#today = ''
++# Else, today_fmt is used as the format for a strftime call.
++#today_fmt = '%B %d, %Y'
++
++# List of patterns, relative to source directory, that match files and
++# directories to ignore when looking for source files.
++exclude_patterns = ["tests", ".tox", ".git", "build_docs", "ld.egg-info"]
++
++# The reST default role (used for this markup: `text`) to use for all
++# documents.
++#default_role = None
++
++# If true, '()' will be appended to :func: etc. cross-reference text.
++add_function_parentheses = True
++
++# If true, the current module name will be prepended to all description
++# unit titles (such as .. function::).
++#add_module_names = True
++
++# If true, sectionauthor and moduleauthor directives will be shown in the
++# output. They are ignored by default.
++#show_authors = False
++
++# The name of the Pygments (syntax highlighting) style to use.
++pygments_style = 'sphinx'
++
++# A list of ignored prefixes for module index sorting.
++#modindex_common_prefix = []
++
++# If true, keep warnings as "system message" paragraphs in the built documents.
++#keep_warnings = False
++
++# If true, `todo` and `todoList` produce output, else they produce nothing.
++todo_include_todos = True
++
++
++# -- Options for HTML output ----------------------------------------------
++
++# The theme to use for HTML and HTML Help pages.
++# See http://www.sphinx-doc.org/en/stable/theming.html for built-in themes.
++html_theme = "classic"
++
++# Theme options are theme-specific and customize the look and feel of a theme
++# further.
++# See http://www.sphinx-doc.org/en/stable/theming.html for the options
++# available for built-in themes.
++html_theme_options = {
++}
++
++# Add any paths that contain custom themes here, relative to this directory.
++#html_theme_path = []
++
++# The name for this set of Sphinx documents. If not defined, it defaults to
++# "<project> v<release> documentation".
++#html_title = None
++
++# A shorter title for the navigation bar. Default is the same as html_title.
++#html_short_title = 'distro'
++
++# The name of an image file (relative to this directory) to place at the top
++# of the sidebar.
++#html_logo = None
++
++# The name of an image file (relative to this directory) to use as a favicon of
++# the docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
++# pixels large.
++#html_favicon = None
++
++# Add any paths that contain custom static files (such as style sheets) here,
++# relative to this directory. They are copied after the builtin static files,
++# so a file named "default.css" will overwrite the builtin "default.css".
++html_static_path = ['html_static']
++
++# Add any extra paths that contain custom files (such as robots.txt or
++# .htaccess) here, relative to this directory. These files are copied
++# directly to the root of the documentation.
++html_extra_path = ['html_extra']
++
++# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
++# using the given strftime format.
++#html_last_updated_fmt = '%b %d, %Y'
++
++# If true, SmartyPants will be used to convert quotes and dashes to
++# typographically correct entities.
++#html_use_smartypants = True
++
++# Custom sidebar templates, maps document names to template names.
++#html_sidebars = {}
++
++# Additional templates that should be rendered to pages, maps page names to
++# template names.
++#html_additional_pages = {}
++
++# If false, no module index is generated.
++#html_domain_indices = True
++
++# If false, no index is generated.
++#html_use_index = True
++
++# If true, the index is split into individual pages for each letter.
++#html_split_index = False
++
++# If true, links to the reST sources are added to the pages.
++#html_show_sourcelink = True
++
++# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
++#html_show_sphinx = True
++
++# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
++#html_show_copyright = True
++
++# If true, an OpenSearch description file will be output, and all pages will
++# contain a <link> tag referring to it. The value of this option must be the
++# base URL from which the finished HTML is served.
++#html_use_opensearch = ''
++
++# This is the file name suffix for HTML files (e.g. ".xhtml").
++#html_file_suffix = None
++
++# Language to be used for generating the HTML full-text search index.
++# Sphinx supports the following languages:
++# 'da', 'de', 'en', 'es', 'fi', 'fr', 'hu', 'it', 'ja'
++# 'nl', 'no', 'pt', 'ro', 'ru', 'sv', 'tr'
++#html_search_language = 'en'
++
++# A dictionary with options for the search language support, empty by default.
++# Now only 'ja' uses this config value
++#html_search_options = {'type': 'default'}
++
++# The name of a javascript file (relative to the configuration directory) that
++# implements a search results scorer. If empty, the default will be used.
++#html_search_scorer = 'scorer.js'
++
++# Output file base name for HTML help builder.
++htmlhelp_basename = 'distro_doc'
++
++# -- Options for LaTeX output ---------------------------------------------
++
++latex_elements = {
++# The paper size ('letterpaper' or 'a4paper').
++#'papersize': 'letterpaper',
++
++# The font size ('10pt', '11pt' or '12pt').
++#'pointsize': '10pt',
++
++# Additional stuff for the LaTeX preamble.
++#'preamble': '',
++
++# Latex figure (float) alignment
++#'figure_align': 'htbp',
++}
++
++# Grouping the document tree into LaTeX files. List of tuples
++# (source start file, target name, title,
++# author, documentclass [howto, manual, or own class]).
++latex_documents = [
++ (master_doc, 'ld.tex', _short_description, author, 'manual'),
++]
++
++# The name of an image file (relative to this directory) to place at the top of
++# the title page.
++#latex_logo = None
++
++# For "manual" documents, if this is true, then toplevel headings are parts,
++# not chapters.
++#latex_use_parts = False
++
++# If true, show page references after internal links.
++#latex_show_pagerefs = False
++
++# If true, show URL addresses after external links.
++#latex_show_urls = False
++
++# Documents to append as an appendix to all manuals.
++#latex_appendices = []
++
++# If false, no module index is generated.
++#latex_domain_indices = True
++
++
++# -- Options for manual page output ---------------------------------------
++
++# One entry per manual page. List of tuples
++# (source start file, name, description, authors, manual section).
++man_pages = [
++ (master_doc, 'ld', _short_description, [author], 1)
++]
++
++# If true, show URL addresses after external links.
++#man_show_urls = False
++
++
++# -- Options for Texinfo output -------------------------------------------
++
++# Grouping the document tree into Texinfo files. List of tuples
++# (source start file, target name, title, author,
++# dir menu entry, description, category)
++texinfo_documents = [
++ (master_doc, 'LinuxDistribution', _short_description,
++ author, 'LinuxDistribution', _short_description,
++ 'Miscellaneous'),
++]
++
++# Documents to append as an appendix to all manuals.
++#texinfo_appendices = []
++
++# If false, no module index is generated.
++#texinfo_domain_indices = True
++
++# How to display URL addresses: 'footnote', 'no', or 'inline'.
++#texinfo_show_urls = 'footnote'
++
++# If true, do not generate a @detailmenu in the "Top" node's menu.
++#texinfo_no_detailmenu = False
++
++
++# -- Options for autodoc extension ----------------------------------------
++# For documentation, see
++# http://www.sphinx-doc.org/en/stable/ext/autodoc.html
++
++# Selects what content will be inserted into a class description.
++# The possible values are:
++# "class" - Only the class’ docstring is inserted. This is the default.
++# "both" - Both the class’ and the __init__ method’s docstring are
++# concatenated and inserted.
++# "init" - Only the __init__ method’s docstring is inserted.
++autoclass_content = "both"
++
++# Selects if automatically documented members are sorted alphabetically
++# (value 'alphabetical'), by member type (value 'groupwise') or by source
++# order (value 'bysource'). The default is alphabetical.
++autodoc_member_order = "bysource"
++
++# -- Options for intersphinx extension ------------------------------------
++# For documentation, see
++# http://www.sphinx-doc.org/en/stable/ext/intersphinx.html
++
++# Defines the prefixes for intersphinx links, and the targets they resolve
++# to. Example RST source for 'py' prefix:
++# :py:func:`platform.dist`
++intersphinx_mapping = {
++ 'py': ('https://docs.python.org/3.5', None)
++}
++
++intersphinx_cache_limit = 5
+diff --git a/third_party/python/distro/docs/index.rst b/third_party/python/distro/docs/index.rst
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/docs/index.rst
+@@ -0,0 +1,476 @@
++
++.. _distro official repo: https://github.com/nir0s/distro
++.. _distro issue tracker: https://github.com/nir0s/distro/issues
++.. _open issues on missing test data: https://github.com/nir0s/distro/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22>
++
++
++**distro** package (Linux Distribution) version |version|
++*********************************************************
++
++Official distro repository: `distro official repo`_
++
++Overview and motivation
++=======================
++
++.. automodule:: distro
++
++If you want to jump into the API description right away, read about the
++`consolidated accessor functions`_.
++
++Compatibility
++=============
++
++The ``distro`` package is supported on Python 2.7, 3.4+ and PyPy, and on
++any Linux or *BSD distribution that provides one or more of the `data sources`_
++used by this package.
++
++This package is tested on Python 2.7, 3.4+ and PyPy, with test data that
++mimics the exact behavior of the data sources of
++`a number of Linux distributions <https://github.com/nir0s/distro/tree/master/tests/resources/distros>`_.
++
++If you want to add test data for more distributions, please
++create an issue in the `distro issue tracker`_
++and provide the following information in the issue:
++
++* The content of the `/etc/os-release` file, if any.
++* The file names and content of the `/etc/*release` and `/etc/*version` files, if any.
++* The output of the command: `lsb_release -a`, if available.
++* The file names and content of any other files you are aware of that provide
++ useful information about the distro.
++
++There are already some `open issues on missing test data`_.
++
++
++Data sources
++============
++
++The ``distro`` package implements a robust and inclusive way of retrieving the
++information about a Linux distribution based on new standards and old methods,
++namely from these data sources:
++
++* The `os-release file`_, if present.
++
++* The `lsb_release command output`_, if the lsb_release command is available.
++
++* The `distro release file`_, if present.
++
++* The `uname command output`_, if present.
++
++
++Access to the information
++=========================
++
++This package provides three ways to access the information about a Linux
++distribution:
++
++* `Consolidated accessor functions`_
++
++ These are module-global functions that take into account all data sources in
++ a priority order, and that return information about the current Linux
++ distribution.
++
++ These functions should be the normal way to access the information.
++
++ The precedence of data sources is applied for each information item
++ separately. Therefore, it is possible that not all information items returned
++ by these functions come from the same data source. For example, on a
++ distribution that has an lsb_release command that returns the
++ "Distributor ID" field but not the "Codename" field, and that has a distro
++ release file that specifies a codename inside, the distro ID will come from
++ the lsb_release command (because it has higher precedence), and the codename
++ will come from the distro release file (because it is not provided by the
++ lsb_release command).
++
++ Examples: :func:`distro.id` for retrieving
++ the distro ID, or :func:`ld.info` to get the machine-readable part of the
++ information in a more aggregated way, or :func:`distro.linux_distribution` with
++ an interface that is compatible to the original
++ :py:func:`platform.linux_distribution` function, supporting a subset of its
++ parameters.
++
++* `Single source accessor functions`_
++
++ These are module-global functions that take into account a single data
++ source, and that return information about the current Linux distribution.
++
++ They are useful for distributions that provide multiple inconsistent data
++ sources, or for retrieving information items that are not provided by the
++ consolidated accessor functions.
++
++ Examples: :func:`distro.os_release_attr` for retrieving a single information
++ item from the os-release data source, or :func:`distro.lsb_release_info` for
++ retrieving all information items from the lsb_release command output data
++ source.
++
++* `LinuxDistribution class`_
++
++ The :class:`distro.LinuxDistribution` class provides the main code of this
++ package.
++
++ This package contains a private module-global :class:`distro.LinuxDistribution`
++ instance with default initialization arguments, that is used by the
++ consolidated and single source accessor functions.
++
++ A user-defined instance of the :class:`distro.LinuxDistribution` class allows
++ specifying the path names of the os-release file and distro release file and
++ whether the lsb_release command should be used or not. That is useful for
++ example when the distribution information from a chrooted environment
++ is to be retrieved, or when a distro has multiple distro release files and
++ the default algorithm uses the wrong one.
++
++
++Consolidated accessor functions
++===============================
++
++This section describes the consolidated accessor functions.
++See `access to the information`_ for a discussion of the different kinds of
++accessor functions.
++
++.. autofunction:: distro.linux_distribution
++.. autofunction:: distro.id
++.. autofunction:: distro.name
++.. autofunction:: distro.version
++.. autofunction:: distro.version_parts
++.. autofunction:: distro.major_version
++.. autofunction:: distro.minor_version
++.. autofunction:: distro.build_number
++.. autofunction:: distro.like
++.. autofunction:: distro.codename
++.. autofunction:: distro.info
++
++Single source accessor functions
++================================
++
++This section describes the single source accessor functions.
++See `access to the information`_ for a discussion of the different kinds of
++accessor functions.
++
++.. autofunction:: distro.os_release_info
++.. autofunction:: distro.lsb_release_info
++.. autofunction:: distro.distro_release_info
++.. autofunction:: distro.os_release_attr
++.. autofunction:: distro.lsb_release_attr
++.. autofunction:: distro.distro_release_attr
++
++LinuxDistribution class
++=======================
++
++This section describes the access via the :class:`distro.LinuxDistribution` class.
++See `access to the information`_ for a discussion of the different kinds of
++accessor functions.
++
++.. autoclass:: distro.LinuxDistribution
++ :members:
++ :undoc-members:
++
++Normalization tables
++====================
++
++These translation tables are used to normalize the parsed distro ID values
++into reliable IDs. See :func:`distro.id` for details.
++
++They are documented in order to show for which distros a normalization is
++currently defined.
++
++As a quick fix, these tables can also be extended by the user by appending new
++entries, should the need arise. If you have a need to get these tables
++extended, please make an according request in the `distro issue tracker`_.
++
++.. autodata:: distro.NORMALIZED_OS_ID
++.. autodata:: distro.NORMALIZED_LSB_ID
++.. autodata:: distro.NORMALIZED_DISTRO_ID
++
++Os-release file
++===============
++
++The os-release file is looked up using the path name ``/etc/os-release``. Its
++optional additional location ``/usr/lib/os-release`` is ignored.
++
++The os-release file is expected to be encoded in UTF-8.
++
++It is parsed using the standard Python :py:mod:`shlex` package, which treats it
++like a shell script.
++
++The attribute names found in the file are translated to lower case and then
++become the keys of the information items from the os-release file data source.
++These keys can be used to retrieve single items with the
++:func:`distro.os_release_attr` function, and they are also used as keys in the
++dictionary returned by :func:`distro.os_release_info`.
++
++The attribute values found in the file are processed using shell rules (e.g.
++for whitespace, escaping, and quoting) before they become the values of the
++information items from the os-release file data source.
++
++If the attribute "VERSION" is found in the file, the distro codename is
++extracted from its value if it can be found there. If a codename is found, it
++becomes an additional information item with key "codename".
++
++See the `os-release man page
++<http://www.freedesktop.org/software/systemd/man/os-release.html>`_
++for a list of possible attributes in the file.
++
++**Examples:**
++
++1. The following os-release file content:
++
++ .. sourcecode:: shell
++
++ NAME='Ubuntu'
++ VERSION="14.04.3 LTS, Trusty Tahr"
++ ID=ubuntu
++ ID_LIKE=debian
++ PRETTY_NAME="Ubuntu 14.04.3 LTS"
++ VERSION_ID="14.04"
++ HOME_URL="http://www.ubuntu.com/"
++ SUPPORT_URL="http://help.ubuntu.com/"
++ BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
++
++ results in these information items:
++
++ =============================== ==========================================
++ Key Value
++ =============================== ==========================================
++ name "Ubuntu"
++ version "14.04.3 LTS, Trusty Tahr"
++ id "ubuntu"
++ id_like "debian"
++ pretty_name "Ubuntu 14.04.3 LTS"
++ version_id "14.04"
++ home_url "http://www.ubuntu.com/"
++ support_url "http://help.ubuntu.com/"
++ bug_report_url "http://bugs.launchpad.net/ubuntu/"
++ codename "Trusty Tahr"
++ =============================== ==========================================
++
++2. The following os-release file content:
++
++ .. sourcecode:: shell
++
++ NAME="Red Hat Enterprise Linux Server"
++ VERSION="7.0 (Maipo)"
++ ID="rhel"
++ ID_LIKE="fedora"
++ VERSION_ID="7.0"
++ PRETTY_NAME="Red Hat Enterprise Linux Server 7.0 (Maipo)"
++ ANSI_COLOR="0;31"
++ CPE_NAME="cpe:/o:redhat:enterprise_linux:7.0:GA:server"
++ HOME_URL="https://www.redhat.com/"
++ BUG_REPORT_URL="https://bugzilla.redhat.com/"
++
++ REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
++ REDHAT_BUGZILLA_PRODUCT_VERSION=7.0
++ REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
++ REDHAT_SUPPORT_PRODUCT_VERSION=7.0
++
++ results in these information items:
++
++ =============================== ==========================================
++ Key Value
++ =============================== ==========================================
++ name "Red Hat Enterprise Linux Server"
++ version "7.0 (Maipo)"
++ id "rhel"
++ id_like "fedora"
++ version_id "7.0"
++ pretty_name "Red Hat Enterprise Linux Server 7.0 (Maipo)"
++ ansi_color "0;31"
++ cpe_name "cpe:/o:redhat:enterprise_linux:7.0:GA:server"
++ home_url "https://www.redhat.com/"
++ bug_report_url "https://bugzilla.redhat.com/"
++ redhat_bugzilla_product "Red Hat Enterprise Linux 7"
++ redhat_bugzilla_product_version "7.0"
++ redhat_support_product "Red Hat Enterprise Linux"
++ redhat_support_product_version "7.0"
++ codename "Maipo"
++ =============================== ==========================================
++
++Lsb_release command output
++==========================
++
++The lsb_release command is expected to be in the PATH, and is invoked as
++follows:
++
++.. sourcecode:: shell
++
++ lsb_release -a
++
++The command output is expected to be encoded in UTF-8.
++
++Only lines in the command output with the following format will be used:
++
++ ``<attr-name>: <attr-value>``
++
++Where:
++
++* ``<attr-name>`` is the name of the attribute, and
++* ``<attr-value>`` is the attribute value.
++
++The attribute names are stripped from surrounding blanks, any remaining blanks
++are translated to underscores, they are translated to lower case, and then
++become the keys of the information items from the lsb_release command output
++data source.
++
++The attribute values are stripped from surrounding blanks, and then become the
++values of the information items from the lsb_release command output data
++source.
++
++See the `lsb_release man page
++<http://refspecs.linuxfoundation.org/LSB_5.0.0/LSB-Core-generic/
++LSB-Core-generic/lsbrelease.html>`_
++for a description of standard attributes returned by the lsb_release command.
++
++**Examples:**
++
++1. The following lsb_release command output:
++
++ .. sourcecode:: text
++
++ No LSB modules are available.
++ Distributor ID: Ubuntu
++ Description: Ubuntu 14.04.3 LTS
++ Release: 14.04
++ Codename: trusty
++
++ results in these information items:
++
++ =============================== ==========================================
++ Key Value
++ =============================== ==========================================
++ distributor_id "Ubuntu"
++ description "Ubuntu 14.04.3 LTS"
++ release "14.04"
++ codename "trusty"
++ =============================== ==========================================
++
++2. The following lsb_release command output:
++
++ .. sourcecode:: text
++
++ LSB Version: n/a
++ Distributor ID: SUSE LINUX
++ Description: SUSE Linux Enterprise Server 12 SP1
++ Release: 12.1
++ Codename: n/a
++
++ results in these information items:
++
++ =============================== ==========================================
++ Key Value
++ =============================== ==========================================
++ lsb_version "n/a"
++ distributor_id "SUSE LINUX"
++ description "SUSE Linux Enterprise Server 12 SP1"
++ release "12.1"
++ codename "n/a"
++ =============================== ==========================================
++
++Distro release file
++===================
++
++Unless specified with a particular path name when using the
++:class:`distro.LinuxDistribution` class, the distro release file is found by using
++the first match in the alphabetically sorted list of the files matching the
++following path name patterns:
++
++* ``/etc/*-release``
++* ``/etc/*_release``
++* ``/etc/*-version``
++* ``/etc/*_version``
++
++where the following special path names are excluded:
++
++* ``/etc/debian_version``
++* ``/etc/system-release``
++* ``/etc/os-release``
++
++and where the first line within the file has the expected format.
++
++The algorithm to sort the files alphabetically is far from perfect, but the
++distro release file has the least priority as a data source, and it is expected
++that distributions provide one of the other data sources.
++
++The distro release file is expected to be encoded in UTF-8.
++
++Only its first line is used, and it is expected to have the following format:
++
++ ``<name> [[[release] <version_id>] (<codename>)]``
++
++Where:
++
++* square brackets indicate optionality,
++* ``<name>`` is the distro name,
++* ``<version_id>`` is the distro version, and
++* ``<codename>`` is the distro codename.
++
++The following information items can be found in a distro release file
++(shown with their keys and data types):
++
++* ``id`` (string): Distro ID, taken from the first part of the file name
++ before the hyphen (``-``) or underscore (``_``).
++
++ Note that the distro ID is not normalized or translated to lower case at this
++ point; this happens only for the result of the :func:`distro.id` function.
++
++* ``name`` (string): Distro name, as found in the first line of the file.
++
++* ``version_id`` (string): Distro version, as found in the first line of the
++ file. If not found, this information item will not exist.
++
++* ``codename`` (string): Distro codename, as found in the first line of the
++ file. If not found, this information item will not exist.
++
++ Note that the string in the codename field is not always really a
++ codename. For example, openSUSE returns "x86_64".
++
++**Examples:**
++
++1. The following distro release file ``/etc/centos-release``:
++
++ .. sourcecode:: text
++
++ CentOS Linux release 7.1.1503 (Core)
++
++ results in these information items:
++
++ =============================== ==========================================
++ Key Value
++ =============================== ==========================================
++ id "centos"
++ name "CentOS Linux"
++ version_id "7.1.1503"
++ codename "Core"
++ =============================== ==========================================
++
++2. The following distro release file ``/etc/oracle-release``:
++
++ .. sourcecode:: text
++
++ Oracle Linux Server release 7.1
++
++ results in these information items:
++
++ =============================== ==========================================
++ Key Value
++ =============================== ==========================================
++ id "oracle"
++ name "Oracle Linux Server"
++ version_id "7.1"
++ =============================== ==========================================
++
++3. The following distro release file ``/etc/SuSE-release``:
++
++ .. sourcecode:: text
++
++ openSUSE 42.1 (x86_64)
++
++ results in these information items:
++
++ =============================== ==========================================
++ Key Value
++ =============================== ==========================================
++ id "SuSE"
++ name "openSUSE"
++ version_id "42.1"
++ codename "x86_64"
++ =============================== ==========================================
++
+diff --git a/third_party/python/distro/query_local_distro.py b/third_party/python/distro/query_local_distro.py
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/query_local_distro.py
+@@ -0,0 +1,45 @@
++#!/usr/bin/env python
++# Copyright 2015,2016 Nir Cohen
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++from __future__ import print_function
++
++from pprint import pformat
++
++import distro
++
++
++def pprint(obj):
++ for line in pformat(obj).split('\n'):
++ print(4 * ' ' + line)
++
++
++print('os_release_info:')
++pprint(distro.os_release_info())
++print('lsb_release_info:')
++pprint(distro.lsb_release_info())
++print('distro_release_info:')
++pprint(distro.distro_release_info())
++print('id: {0}'.format(distro.id()))
++print('name: {0}'.format(distro.name()))
++print('name_pretty: {0}'.format(distro.name(True)))
++print('version: {0}'.format(distro.version()))
++print('version_pretty: {0}'.format(distro.version(True)))
++print('like: {0}'.format(distro.like()))
++print('codename: {0}'.format(distro.codename()))
++print('linux_distribution_full: {0}'.format(distro.linux_distribution()))
++print('linux_distribution: {0}'.format(distro.linux_distribution(False)))
++print('major_version: {0}'.format(distro.major_version()))
++print('minor_version: {0}'.format(distro.minor_version()))
++print('build_number: {0}'.format(distro.build_number()))
+diff --git a/third_party/python/distro/setup.cfg b/third_party/python/distro/setup.cfg
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/setup.cfg
+@@ -0,0 +1,10 @@
++[bdist_wheel]
++universal = 1
++
++[metadata]
++license_file = LICENSE
++
++[egg_info]
++tag_build =
++tag_date = 0
++
+diff --git a/third_party/python/distro/setup.py b/third_party/python/distro/setup.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/setup.py
+@@ -0,0 +1,67 @@
++# Copyright 2015,2016 Nir Cohen
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++import os
++import codecs
++from setuptools import setup
++
++# The following version is parsed by other parts of this package.
++# Don't change the format of the line, or the variable name.
++package_version = "1.4.0"
++
++here = os.path.abspath(os.path.dirname(__file__))
++
++
++def read(*parts):
++ # intentionally *not* adding an encoding option to open
++ return codecs.open(os.path.join(here, *parts), 'r').read()
++
++
++setup(
++ name='distro',
++ version=package_version,
++ url='https://github.com/nir0s/distro',
++ author='Nir Cohen',
++ author_email='nir36g@gmail.com',
++ license='Apache License, Version 2.0',
++ platforms='All',
++ description='Distro - an OS platform information API',
++ long_description=read('README.md'),
++ long_description_content_type='text/markdown',
++ py_modules=['distro'],
++ entry_points={
++ 'console_scripts': [
++ 'distro = distro:main',
++ ]
++ },
++ classifiers=[
++ 'Development Status :: 5 - Production/Stable',
++ 'Intended Audience :: Developers',
++ 'Intended Audience :: System Administrators',
++ 'License :: OSI Approved :: Apache Software License',
++ 'Operating System :: POSIX :: Linux',
++ 'Operating System :: POSIX :: BSD',
++ 'Operating System :: POSIX :: BSD :: FreeBSD',
++ 'Operating System :: POSIX :: BSD :: NetBSD',
++ 'Operating System :: POSIX :: BSD :: OpenBSD',
++ 'Programming Language :: Python :: 2',
++ 'Programming Language :: Python :: 2.7',
++ 'Programming Language :: Python :: 3',
++ 'Programming Language :: Python :: 3.4',
++ 'Programming Language :: Python :: 3.5',
++ 'Programming Language :: Python :: 3.6',
++ 'Topic :: Software Development :: Libraries :: Python Modules',
++ 'Topic :: System :: Operating System',
++ ]
++)
+diff --git a/third_party/python/distro/tests/__init__.py b/third_party/python/distro/tests/__init__.py
+new file mode 100644
+diff --git a/third_party/python/distro/tests/resources/distros/__shared__/bin/lsb_release b/third_party/python/distro/tests/resources/distros/__shared__/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/__shared__/bin/lsb_release
+@@ -0,0 +1,43 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command reads an lsb-release file.
++#
++# The lsb-release file has the usual format, e.g.:
++# DISTRIB_ID=Ubuntu
++# DISTRIB_RELEASE=14.04
++# DISTRIB_CODENAME=trusty
++# DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
++# Where each line is optional. If a line is missing, the default value
++# will be the empty string.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++# Because the PATH is set to just this directory, we cannot use 'dirname'
++# or other external programs, but need to use built-in abilities of bash.
++LSB_FILE="${0%/*}/../etc/lsb-release"
++
++if [[ ! -f $LSB_FILE ]]; then
++ echo "Error: LSB release file does not exist: $LSB_FILE"
++ exit 1
++fi
++
++source $LSB_FILE
++
++if [[ -n $LSB_VERSION ]]; then
++ echo "LSB Version: $LSB_VERSION"
++else
++ echo "No LSB modules are available."
++fi
++echo "Distributor ID: ${DISTRIB_ID:-}"
++echo "Description: ${DISTRIB_DESCRIPTION:-}"
++echo "Release: ${DISTRIB_RELEASE:-}"
++echo "Codename: ${DISTRIB_CODENAME:-}"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/amazon2014/etc/system-release b/third_party/python/distro/tests/resources/distros/amazon2014/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/amazon2014/etc/system-release
+@@ -0,0 +1,1 @@
++Amazon Linux AMI release 2014.03
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/amazon2016/etc/os-release b/third_party/python/distro/tests/resources/distros/amazon2016/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/amazon2016/etc/os-release
+@@ -0,0 +1,9 @@
++NAME="Amazon Linux AMI"
++VERSION="2016.03"
++ID="amzn"
++ID_LIKE="rhel fedora"
++VERSION_ID="2016.03"
++PRETTY_NAME="Amazon Linux AMI 2016.03"
++ANSI_COLOR="0;33"
++CPE_NAME="cpe:/o:amazon:linux:2016.03:ga"
++HOME_URL="http://aws.amazon.com/amazon-linux-ami/"
+diff --git a/third_party/python/distro/tests/resources/distros/amazon2016/etc/system-release b/third_party/python/distro/tests/resources/distros/amazon2016/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/amazon2016/etc/system-release
+@@ -0,0 +1,1 @@
++Amazon Linux AMI release 2016.03
+diff --git a/third_party/python/distro/tests/resources/distros/arch/etc/arch-release b/third_party/python/distro/tests/resources/distros/arch/etc/arch-release
+new file mode 100644
+diff --git a/third_party/python/distro/tests/resources/distros/arch/etc/os-release b/third_party/python/distro/tests/resources/distros/arch/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/arch/etc/os-release
+@@ -0,0 +1,7 @@
++NAME="Arch Linux"
++ID=arch
++PRETTY_NAME="Arch Linux"
++ANSI_COLOR="0;36"
++HOME_URL="https://www.archlinux.org/"
++SUPPORT_URL="https://bbs.archlinux.org/"
++BUG_REPORT_URL="https://bugs.archlinux.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/arch/usr/lib/os-release b/third_party/python/distro/tests/resources/distros/arch/usr/lib/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/arch/usr/lib/os-release
+@@ -0,0 +1,7 @@
++NAME="Arch Linux"
++ID=arch
++PRETTY_NAME="Arch Linux"
++ANSI_COLOR="0;36"
++HOME_URL="https://www.archlinux.org/"
++SUPPORT_URL="https://bbs.archlinux.org/"
++BUG_REPORT_URL="https://bugs.archlinux.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/centos5/etc/centos-release b/third_party/python/distro/tests/resources/distros/centos5/etc/centos-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/centos5/etc/centos-release
+@@ -0,0 +1,1 @@
++CentOS release 5.11 (Final)
+diff --git a/third_party/python/distro/tests/resources/distros/centos5/etc/redhat-release b/third_party/python/distro/tests/resources/distros/centos5/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/centos5/etc/redhat-release
+@@ -0,0 +1,1 @@
++CentOS release 5.11 (Final)
+diff --git a/third_party/python/distro/tests/resources/distros/centos5/etc/system-release b/third_party/python/distro/tests/resources/distros/centos5/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/centos5/etc/system-release
+@@ -0,0 +1,1 @@
++CentOS release 5.11 (Final)
+diff --git a/third_party/python/distro/tests/resources/distros/centos7/etc/centos-release b/third_party/python/distro/tests/resources/distros/centos7/etc/centos-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/centos7/etc/centos-release
+@@ -0,0 +1,1 @@
++CentOS Linux release 7.1.1503 (Core)
+diff --git a/third_party/python/distro/tests/resources/distros/centos7/etc/os-release b/third_party/python/distro/tests/resources/distros/centos7/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/centos7/etc/os-release
+@@ -0,0 +1,16 @@
++NAME="CentOS Linux"
++VERSION="7 (Core)"
++ID="centos"
++ID_LIKE="rhel fedora"
++VERSION_ID="7"
++PRETTY_NAME="CentOS Linux 7 (Core)"
++ANSI_COLOR="0;31"
++CPE_NAME="cpe:/o:centos:centos:7"
++HOME_URL="https://www.centos.org/"
++BUG_REPORT_URL="https://bugs.centos.org/"
++
++CENTOS_MANTISBT_PROJECT="CentOS-7"
++CENTOS_MANTISBT_PROJECT_VERSION="7"
++REDHAT_SUPPORT_PRODUCT="centos"
++REDHAT_SUPPORT_PRODUCT_VERSION="7"
++
+diff --git a/third_party/python/distro/tests/resources/distros/centos7/etc/redhat-release b/third_party/python/distro/tests/resources/distros/centos7/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/centos7/etc/redhat-release
+@@ -0,0 +1,1 @@
++CentOS Linux release 7.1.1503 (Core)
+diff --git a/third_party/python/distro/tests/resources/distros/centos7/etc/system-release b/third_party/python/distro/tests/resources/distros/centos7/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/centos7/etc/system-release
+@@ -0,0 +1,1 @@
++CentOS Linux release 7.1.1503 (Core)
+diff --git a/third_party/python/distro/tests/resources/distros/cloudlinux5/etc/redhat-release b/third_party/python/distro/tests/resources/distros/cloudlinux5/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/cloudlinux5/etc/redhat-release
+@@ -0,0 +1,1 @@
++CloudLinux Server release 5.11 (Vladislav Volkov)
+diff --git a/third_party/python/distro/tests/resources/distros/cloudlinux6/etc/redhat-release b/third_party/python/distro/tests/resources/distros/cloudlinux6/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/cloudlinux6/etc/redhat-release
+@@ -0,0 +1,1 @@
++CloudLinux Server release 6.8 (Oleg Makarov)
+diff --git a/third_party/python/distro/tests/resources/distros/cloudlinux7/etc/os-release b/third_party/python/distro/tests/resources/distros/cloudlinux7/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/cloudlinux7/etc/os-release
+@@ -0,0 +1,10 @@
++NAME="CloudLinux"
++VERSION="7.3 (Yury Malyshev)"
++ID="cloudlinux"
++ID_LIKE="rhel fedora centos"
++VERSION_ID="7.3"
++PRETTY_NAME="CloudLinux 7.3 (Yury Malyshev)"
++ANSI_COLOR="0:31"
++CPE_NAME="cpe:/o:cloudlinux:cloudlinux:7.3:GA:server"
++HOME_URL="https://www.cloudlinux.com/"
++BUG_REPORT_URL="https://helpdesk.cloudlinux.com/"
+diff --git a/third_party/python/distro/tests/resources/distros/cloudlinux7/etc/redhat-release b/third_party/python/distro/tests/resources/distros/cloudlinux7/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/cloudlinux7/etc/redhat-release
+@@ -0,0 +1,1 @@
++CloudLinux release 7.3 (Yury Malyshev)
+diff --git a/third_party/python/distro/tests/resources/distros/coreos/etc/oem-release b/third_party/python/distro/tests/resources/distros/coreos/etc/oem-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/coreos/etc/oem-release
+@@ -0,0 +1,5 @@
++ID=digitalocean
++VERSION_ID=0.0.4
++NAME="DigitalOcean"
++HOME_URL="https://www.digitalocean.com/"
++BUG_REPORT_URL="https://github.com/coreos/bugs/issues"
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/coreos/etc/os-release b/third_party/python/distro/tests/resources/distros/coreos/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/coreos/etc/os-release
+@@ -0,0 +1,9 @@
++NAME=CoreOS
++ID=coreos
++VERSION=899.15.0
++VERSION_ID=899.15.0
++BUILD_ID=2016-04-05-1035
++PRETTY_NAME="CoreOS 899.15.0"
++ANSI_COLOR="1;32"
++HOME_URL="https://coreos.com/"
++BUG_REPORT_URL="https://github.com/coreos/bugs/issues"
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/debian8/bin/lsb_release b/third_party/python/distro/tests/resources/distros/debian8/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/debian8/bin/lsb_release
+@@ -0,0 +1,21 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command works without a corresponding
++# etc/lsb-release file.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++echo "No LSB modules are available."
++echo "Distributor ID: Debian"
++echo "Description: Debian GNU/Linux 8.2 (jessie)"
++echo "Release: 8.2"
++echo "Codename: jessie"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/debian8/etc/debian_version b/third_party/python/distro/tests/resources/distros/debian8/etc/debian_version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/debian8/etc/debian_version
+@@ -0,0 +1,1 @@
++8.2
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/debian8/etc/os-release b/third_party/python/distro/tests/resources/distros/debian8/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/debian8/etc/os-release
+@@ -0,0 +1,8 @@
++PRETTY_NAME="Debian GNU/Linux 8 (jessie)"
++NAME="Debian GNU/Linux"
++VERSION_ID="8"
++VERSION="8 (jessie)"
++ID=debian
++HOME_URL="http://www.debian.org/"
++SUPPORT_URL="http://www.debian.org/support/"
++BUG_REPORT_URL="https://bugs.debian.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/exherbo/etc/os-release b/third_party/python/distro/tests/resources/distros/exherbo/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/exherbo/etc/os-release
+@@ -0,0 +1,7 @@
++NAME="Exherbo"
++PRETTY_NAME="Exherbo Linux"
++ID="exherbo"
++ANSI_COLOR="0;32"
++HOME_URL="https://www.exherbo.org/"
++SUPPORT_URL="irc://irc.freenode.net/#exherbo"
++BUG_REPORT_URL="https://bugs.exherbo.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/fedora19/etc/fedora-release b/third_party/python/distro/tests/resources/distros/fedora19/etc/fedora-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora19/etc/fedora-release
+@@ -0,0 +1,1 @@
++Fedora release 19 (Schrödinger’s Cat)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora19/etc/issue b/third_party/python/distro/tests/resources/distros/fedora19/etc/issue
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora19/etc/issue
+@@ -0,0 +1,3 @@
++Fedora release 19 (Schrödinger’s Cat)
++Kernel \r on an \m (\l)
++
+diff --git a/third_party/python/distro/tests/resources/distros/fedora19/etc/issue.net b/third_party/python/distro/tests/resources/distros/fedora19/etc/issue.net
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora19/etc/issue.net
+@@ -0,0 +1,2 @@
++Fedora release 19 (Schrödinger’s Cat)
++Kernel \r on an \m (\l)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora19/etc/os-release b/third_party/python/distro/tests/resources/distros/fedora19/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora19/etc/os-release
+@@ -0,0 +1,7 @@
++NAME=Fedora
++VERSION="19 (Schrödinger’s Cat)"
++ID=fedora
++VERSION_ID=19
++PRETTY_NAME="Fedora 19 (Schrödinger’s Cat)"
++ANSI_COLOR="0;34"
++CPE_NAME="cpe:/o:fedoraproject:fedora:19"
+diff --git a/third_party/python/distro/tests/resources/distros/fedora19/etc/redhat-release b/third_party/python/distro/tests/resources/distros/fedora19/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora19/etc/redhat-release
+@@ -0,0 +1,1 @@
++Fedora release 19 (Schrödinger’s Cat)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora19/etc/system-release b/third_party/python/distro/tests/resources/distros/fedora19/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora19/etc/system-release
+@@ -0,0 +1,1 @@
++Fedora release 19 (Schrödinger’s Cat)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora19/etc/system-release-cpe b/third_party/python/distro/tests/resources/distros/fedora19/etc/system-release-cpe
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora19/etc/system-release-cpe
+@@ -0,0 +1,1 @@
++cpe:/o:fedoraproject:fedora:19
+diff --git a/third_party/python/distro/tests/resources/distros/fedora23/etc/fedora-release b/third_party/python/distro/tests/resources/distros/fedora23/etc/fedora-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora23/etc/fedora-release
+@@ -0,0 +1,1 @@
++Fedora release 23 (Twenty Three)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora23/etc/os-release b/third_party/python/distro/tests/resources/distros/fedora23/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora23/etc/os-release
+@@ -0,0 +1,14 @@
++NAME=Fedora
++VERSION="23 (Twenty Three)"
++ID=fedora
++VERSION_ID=23
++PRETTY_NAME="Fedora 23 (Twenty Three)"
++ANSI_COLOR="0;34"
++CPE_NAME="cpe:/o:fedoraproject:fedora:23"
++HOME_URL="https://fedoraproject.org/"
++BUG_REPORT_URL="https://bugzilla.redhat.com/"
++REDHAT_BUGZILLA_PRODUCT="Fedora"
++REDHAT_BUGZILLA_PRODUCT_VERSION=23
++REDHAT_SUPPORT_PRODUCT="Fedora"
++REDHAT_SUPPORT_PRODUCT_VERSION=23
++PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy
+diff --git a/third_party/python/distro/tests/resources/distros/fedora23/etc/redhat-release b/third_party/python/distro/tests/resources/distros/fedora23/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora23/etc/redhat-release
+@@ -0,0 +1,1 @@
++Fedora release 23 (Twenty Three)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora23/etc/system-release b/third_party/python/distro/tests/resources/distros/fedora23/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora23/etc/system-release
+@@ -0,0 +1,1 @@
++Fedora release 23 (Twenty Three)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora23/usr/lib/os-release b/third_party/python/distro/tests/resources/distros/fedora23/usr/lib/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora23/usr/lib/os-release
+@@ -0,0 +1,14 @@
++NAME=Fedora
++VERSION="23 (Twenty Three)"
++ID=fedora
++VERSION_ID=23
++PRETTY_NAME="Fedora 23 (Twenty Three)"
++ANSI_COLOR="0;34"
++CPE_NAME="cpe:/o:fedoraproject:fedora:23"
++HOME_URL="https://fedoraproject.org/"
++BUG_REPORT_URL="https://bugzilla.redhat.com/"
++REDHAT_BUGZILLA_PRODUCT="Fedora"
++REDHAT_BUGZILLA_PRODUCT_VERSION=23
++REDHAT_SUPPORT_PRODUCT="Fedora"
++REDHAT_SUPPORT_PRODUCT_VERSION=23
++PRIVACY_POLICY_URL=https://fedoraproject.org/wiki/Legal:PrivacyPolicy
+diff --git a/third_party/python/distro/tests/resources/distros/fedora30/etc/fedora-release b/third_party/python/distro/tests/resources/distros/fedora30/etc/fedora-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora30/etc/fedora-release
+@@ -0,0 +1,1 @@
++Fedora release 30 (Thirty)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora30/etc/os-release b/third_party/python/distro/tests/resources/distros/fedora30/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora30/etc/os-release
+@@ -0,0 +1,19 @@
++NAME=Fedora
++VERSION="30 (Thirty)"
++ID=fedora
++VERSION_ID=30
++VERSION_CODENAME=""
++PLATFORM_ID="platform:f30"
++PRETTY_NAME="Fedora 30 (Thirty)"
++ANSI_COLOR="0;34"
++LOGO=fedora-logo-icon
++CPE_NAME="cpe:/o:fedoraproject:fedora:30"
++HOME_URL="https://fedoraproject.org/"
++DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/30/system-administrators-guide/"
++SUPPORT_URL="https://fedoraproject.org/wiki/Communicating_and_getting_help"
++BUG_REPORT_URL="https://bugzilla.redhat.com/"
++REDHAT_BUGZILLA_PRODUCT="Fedora"
++REDHAT_BUGZILLA_PRODUCT_VERSION=30
++REDHAT_SUPPORT_PRODUCT="Fedora"
++REDHAT_SUPPORT_PRODUCT_VERSION=30
++PRIVACY_POLICY_URL="https://fedoraproject.org/wiki/Legal:PrivacyPolicy"
+diff --git a/third_party/python/distro/tests/resources/distros/fedora30/etc/redhat-release b/third_party/python/distro/tests/resources/distros/fedora30/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora30/etc/redhat-release
+@@ -0,0 +1,1 @@
++Fedora release 30 (Thirty)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora30/etc/system-release b/third_party/python/distro/tests/resources/distros/fedora30/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora30/etc/system-release
+@@ -0,0 +1,1 @@
++Fedora release 30 (Thirty)
+diff --git a/third_party/python/distro/tests/resources/distros/fedora30/usr/lib/os-release b/third_party/python/distro/tests/resources/distros/fedora30/usr/lib/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/fedora30/usr/lib/os-release
+@@ -0,0 +1,19 @@
++NAME=Fedora
++VERSION="30 (Thirty)"
++ID=fedora
++VERSION_ID=30
++VERSION_CODENAME=""
++PLATFORM_ID="platform:f30"
++PRETTY_NAME="Fedora 30 (Thirty)"
++ANSI_COLOR="0;34"
++LOGO=fedora-logo-icon
++CPE_NAME="cpe:/o:fedoraproject:fedora:30"
++HOME_URL="https://fedoraproject.org/"
++DOCUMENTATION_URL="https://docs.fedoraproject.org/en-US/fedora/30/system-administrators-guide/"
++SUPPORT_URL="https://fedoraproject.org/wiki/Communicating_and_getting_help"
++BUG_REPORT_URL="https://bugzilla.redhat.com/"
++REDHAT_BUGZILLA_PRODUCT="Fedora"
++REDHAT_BUGZILLA_PRODUCT_VERSION=30
++REDHAT_SUPPORT_PRODUCT="Fedora"
++REDHAT_SUPPORT_PRODUCT_VERSION=30
++PRIVACY_POLICY_URL="https://fedoraproject.org/wiki/Legal:PrivacyPolicy"
+diff --git a/third_party/python/distro/tests/resources/distros/freebsd111/bin/uname b/third_party/python/distro/tests/resources/distros/freebsd111/bin/uname
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/freebsd111/bin/uname
+@@ -0,0 +1,4 @@
++#!/bin/sh
++
++echo "FreeBSD 11.1-RELEASE"
++
+diff --git a/third_party/python/distro/tests/resources/distros/gentoo/etc/gentoo-release b/third_party/python/distro/tests/resources/distros/gentoo/etc/gentoo-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/gentoo/etc/gentoo-release
+@@ -0,0 +1,1 @@
++Gentoo Base System release 2.2
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/gentoo/etc/os-release b/third_party/python/distro/tests/resources/distros/gentoo/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/gentoo/etc/os-release
+@@ -0,0 +1,7 @@
++NAME=Gentoo
++ID=gentoo
++PRETTY_NAME="Gentoo/Linux"
++ANSI_COLOR="1;32"
++HOME_URL="http://www.gentoo.org/"
++SUPPORT_URL="http://www.gentoo.org/main/en/support.xml"
++BUG_REPORT_URL="https://bugs.gentoo.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/kali/etc/os-release b/third_party/python/distro/tests/resources/distros/kali/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/kali/etc/os-release
+@@ -0,0 +1,10 @@
++PRETTY_NAME="Kali GNU/Linux Rolling"
++NAME="Kali GNU/Linux"
++ID=kali
++VERSION="2017.1"
++VERSION_ID="2017.1"
++ID_LIKE=debian
++ANSI_COLOR="1;31"
++HOME_URL="http://www.kali.org/"
++SUPPORT_URL="http://forums.kali.org/"
++BUG_REPORT_URL="http://bugs.kali.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/kvmibm1/bin/lsb_release b/third_party/python/distro/tests/resources/distros/kvmibm1/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/kvmibm1/bin/lsb_release
+@@ -0,0 +1,21 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command works without a corresponding
++# etc/lsb-release file.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++echo "LSB Version: :core-4.1-noarch:core-4.1-s390x"
++echo "Distributor ID: kvmibm"
++echo "Description: KVM for IBM z Systems release 1.1.1 (Z) "
++echo "Release: 1.1.1"
++echo "Codename: Z"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/kvmibm1/etc/base-release b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/base-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/base-release
+@@ -0,0 +1,1 @@
++KVM for IBM z Systems release 1.1.1 (Z)
+diff --git a/third_party/python/distro/tests/resources/distros/kvmibm1/etc/os-release b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/os-release
+@@ -0,0 +1,9 @@
++NAME="KVM for IBM z Systems"
++VERSION="1.1.1 (Z)"
++ID="kvmibm"
++ID_LIKE="rhel fedora"
++VERSION_ID="1.1.1"
++PRETTY_NAME="KVM for IBM z Systems 1.1.1 (Z)"
++ANSI_COLOR="0;34"
++CPE_NAME="cpe:/o:ibm:kvmibm:1.1.1"
++BUILD_ID="20160316"
+diff --git a/third_party/python/distro/tests/resources/distros/kvmibm1/etc/redhat-release b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/redhat-release
+@@ -0,0 +1,1 @@
++KVM for IBM z Systems release 1.1.1 (Z)
+diff --git a/third_party/python/distro/tests/resources/distros/kvmibm1/etc/system-release b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/kvmibm1/etc/system-release
+@@ -0,0 +1,1 @@
++KVM for IBM z Systems release 1.1.1 (Z)
+diff --git a/third_party/python/distro/tests/resources/distros/linuxmint17/bin/lsb_release b/third_party/python/distro/tests/resources/distros/linuxmint17/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/linuxmint17/bin/lsb_release
+@@ -0,0 +1,43 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command reads an lsb-release file.
++#
++# The lsb-release file has the usual format, e.g.:
++# DISTRIB_ID=Ubuntu
++# DISTRIB_RELEASE=14.04
++# DISTRIB_CODENAME=trusty
++# DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
++# Where each line is optional. If a line is missing, the default value
++# will be the empty string.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++# Because the PATH is set to just this directory, we cannot use 'dirname'
++# or other external programs, but need to use built-in abilities of bash.
++LSB_FILE="${0%/*}/../etc/lsb-release"
++
++if [[ ! -f $LSB_FILE ]]; then
++ echo "Error: LSB release file does not exist: $LSB_FILE"
++ exit 1
++fi
++
++source $LSB_FILE
++
++if [[ -n $LSB_VERSION ]]; then
++ echo "LSB Version: $LSB_VERSION"
++else
++ echo "No LSB modules are available."
++fi
++echo "Distributor ID: ${DISTRIB_ID:-}"
++echo "Description: ${DISTRIB_DESCRIPTION:-}"
++echo "Release: ${DISTRIB_RELEASE:-}"
++echo "Codename: ${DISTRIB_CODENAME:-}"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/linuxmint17/etc/debian_version b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/debian_version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/debian_version
+@@ -0,0 +1,1 @@
++jessie/sid
+diff --git a/third_party/python/distro/tests/resources/distros/linuxmint17/etc/lsb-release b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/lsb-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/lsb-release
+@@ -0,0 +1,4 @@
++DISTRIB_ID=LinuxMint
++DISTRIB_RELEASE=17.3
++DISTRIB_CODENAME=rosa
++DISTRIB_DESCRIPTION="Linux Mint 17.3 Rosa"
+diff --git a/third_party/python/distro/tests/resources/distros/linuxmint17/etc/os-release b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/os-release
+@@ -0,0 +1,9 @@
++NAME="Ubuntu"
++VERSION="14.04.3 LTS, Trusty Tahr"
++ID=ubuntu
++ID_LIKE=debian
++PRETTY_NAME="Ubuntu 14.04.3 LTS"
++VERSION_ID="14.04"
++HOME_URL="http://www.ubuntu.com/"
++SUPPORT_URL="http://help.ubuntu.com/"
++BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
+diff --git a/third_party/python/distro/tests/resources/distros/linuxmint17/etc/upstream-release/lsb-release b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/upstream-release/lsb-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/linuxmint17/etc/upstream-release/lsb-release
+@@ -0,0 +1,4 @@
++DISTRIB_ID=Ubuntu
++DISTRIB_RELEASE=14.04
++DISTRIB_CODENAME=trusty
++DISTRIB_DESCRIPTION="Ubuntu 14.04 LTS"
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/bin/lsb_release b/third_party/python/distro/tests/resources/distros/mageia5/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/bin/lsb_release
+@@ -0,0 +1,39 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command reads an lsb-release file.
++#
++# The lsb-release file has the usual format, e.g.:
++# DISTRIB_ID=Ubuntu
++# DISTRIB_RELEASE=14.04
++# DISTRIB_CODENAME=trusty
++# DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
++# Where each line is optional. If a line is missing, the default value
++# will be the empty string.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++# Because the PATH is set to just this directory, we cannot use 'dirname'
++# or other external programs, but need to use built-in abilities of bash.
++LSB_FILE="${0%/*}/../etc/lsb-release"
++
++if [[ ! -f $LSB_FILE ]]; then
++ echo "Error: LSB release file does not exist: $LSB_FILE"
++ exit 1
++fi
++
++source $LSB_FILE
++
++echo "LSB Version: ${LSB_VERSION:-*}"
++echo "Distributor ID: ${DISTRIB_ID:-}"
++echo "Description: ${DISTRIB_DESCRIPTION:-}"
++echo "Release: ${DISTRIB_RELEASE:-}"
++echo "Codename: ${DISTRIB_CODENAME:-}"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/lsb-release b/third_party/python/distro/tests/resources/distros/mageia5/etc/lsb-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/lsb-release
+@@ -0,0 +1,5 @@
++LSB_VERSION=
++DISTRIB_ID="Mageia"
++DISTRIB_RELEASE=5
++DISTRIB_CODENAME=thornicroft
++DISTRIB_DESCRIPTION="Mageia 5"
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/mageia-release b/third_party/python/distro/tests/resources/distros/mageia5/etc/mageia-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/mageia-release
+@@ -0,0 +1,1 @@
++Mageia release 5 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/mandrake-release b/third_party/python/distro/tests/resources/distros/mageia5/etc/mandrake-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/mandrake-release
+@@ -0,0 +1,1 @@
++Mageia release 5 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/mandrakelinux-release b/third_party/python/distro/tests/resources/distros/mageia5/etc/mandrakelinux-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/mandrakelinux-release
+@@ -0,0 +1,1 @@
++Mageia release 5 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/mandriva-release b/third_party/python/distro/tests/resources/distros/mageia5/etc/mandriva-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/mandriva-release
+@@ -0,0 +1,1 @@
++Mageia release 5 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/os-release b/third_party/python/distro/tests/resources/distros/mageia5/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/os-release
+@@ -0,0 +1,10 @@
++NAME="Mageia"
++VERSION="5"
++ID=mageia
++VERSION_ID=5
++ID_LIKE="mandriva fedora"
++PRETTY_NAME="Mageia 5"
++ANSI_COLOR="1;36"
++HOME_URL="http://www.mageia.org/"
++SUPPORT_URL="http://www.mageia.org/support/"
++BUG_REPORT_URL="https://bugs.mageia.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/redhat-release b/third_party/python/distro/tests/resources/distros/mageia5/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/redhat-release
+@@ -0,0 +1,1 @@
++Mageia release 5 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/release b/third_party/python/distro/tests/resources/distros/mageia5/etc/release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/release
+@@ -0,0 +1,1 @@
++Mageia release 5 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/etc/version b/third_party/python/distro/tests/resources/distros/mageia5/etc/version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/etc/version
+@@ -0,0 +1,1 @@
++5 2 official
+diff --git a/third_party/python/distro/tests/resources/distros/mageia5/usr/lib/os-release b/third_party/python/distro/tests/resources/distros/mageia5/usr/lib/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mageia5/usr/lib/os-release
+@@ -0,0 +1,10 @@
++NAME="Mageia"
++VERSION="5"
++ID=mageia
++VERSION_ID=5
++ID_LIKE="mandriva fedora"
++PRETTY_NAME="Mageia 5"
++ANSI_COLOR="1;36"
++HOME_URL="http://www.mageia.org/"
++SUPPORT_URL="http://www.mageia.org/support/"
++BUG_REPORT_URL="https://bugs.mageia.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/bin/lsb_release b/third_party/python/distro/tests/resources/distros/mandriva2011/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/bin/lsb_release
+@@ -0,0 +1,39 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command reads an lsb-release file.
++#
++# The lsb-release file has the usual format, e.g.:
++# DISTRIB_ID=Ubuntu
++# DISTRIB_RELEASE=14.04
++# DISTRIB_CODENAME=trusty
++# DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
++# Where each line is optional. If a line is missing, the default value
++# will be the empty string.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++# Because the PATH is set to just this directory, we cannot use 'dirname'
++# or other external programs, but need to use built-in abilities of bash.
++LSB_FILE="${0%/*}/../etc/lsb-release"
++
++if [[ ! -f $LSB_FILE ]]; then
++ echo "Error: LSB release file does not exist: $LSB_FILE"
++ exit 1
++fi
++
++source $LSB_FILE
++
++echo "LSB Version: ${LSB_VERSION:-*}"
++echo "Distributor ID: ${DISTRIB_ID:-}"
++echo "Description: ${DISTRIB_DESCRIPTION:-}"
++echo "Release: ${DISTRIB_RELEASE:-}"
++echo "Codename: ${DISTRIB_CODENAME:-}"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/etc/lsb-release b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/lsb-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/lsb-release
+@@ -0,0 +1,5 @@
++LSB_VERSION=
++DISTRIB_ID=MandrivaLinux
++DISTRIB_RELEASE=2011.0
++DISTRIB_CODENAME=turtle
++DISTRIB_DESCRIPTION="Mandriva Linux 2011.0"
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandrake-release b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandrake-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandrake-release
+@@ -0,0 +1,1 @@
++Mandriva Linux release 2011.0 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandrakelinux-release b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandrakelinux-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandrakelinux-release
+@@ -0,0 +1,1 @@
++Mandriva Linux release 2011.0 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandriva-release b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandriva-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/mandriva-release
+@@ -0,0 +1,1 @@
++Mandriva Linux release 2011.0 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/etc/redhat-release b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/redhat-release
+@@ -0,0 +1,1 @@
++Mandriva Linux release 2011.0 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/etc/release b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/release
+@@ -0,0 +1,1 @@
++Mandriva Linux release 2011.0 (Official) for x86_64
+diff --git a/third_party/python/distro/tests/resources/distros/mandriva2011/etc/version b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/mandriva2011/etc/version
+@@ -0,0 +1,1 @@
++2011.0.0 2 cooker
+diff --git a/third_party/python/distro/tests/resources/distros/manjaro1512/bin/lsb_release b/third_party/python/distro/tests/resources/distros/manjaro1512/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/manjaro1512/bin/lsb_release
+@@ -0,0 +1,43 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command reads an lsb-release file.
++#
++# The lsb-release file has the usual format, e.g.:
++# DISTRIB_ID=Ubuntu
++# DISTRIB_RELEASE=14.04
++# DISTRIB_CODENAME=trusty
++# DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
++# Where each line is optional. If a line is missing, the default value
++# will be the empty string.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++# Because the PATH is set to just this directory, we cannot use 'dirname'
++# or other external programs, but need to use built-in abilities of bash.
++LSB_FILE="${0%/*}/../etc/lsb-release"
++
++if [[ ! -f $LSB_FILE ]]; then
++ echo "Error: LSB release file does not exist: $LSB_FILE"
++ exit 1
++fi
++
++source $LSB_FILE
++
++if [[ -n $LSB_VERSION ]]; then
++ echo "LSB Version: $LSB_VERSION"
++else
++ echo "No LSB modules are available."
++fi
++echo "Distributor ID: ${DISTRIB_ID:-}"
++echo "Description: ${DISTRIB_DESCRIPTION:-}"
++echo "Release: ${DISTRIB_RELEASE:-}"
++echo "Codename: ${DISTRIB_CODENAME:-}"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/manjaro1512/etc/lsb-release b/third_party/python/distro/tests/resources/distros/manjaro1512/etc/lsb-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/manjaro1512/etc/lsb-release
+@@ -0,0 +1,4 @@
++DISTRIB_ID=ManjaroLinux
++DISTRIB_RELEASE=15.12
++DISTRIB_CODENAME=Capella
++DISTRIB_DESCRIPTION="Manjaro Linux"
+diff --git a/third_party/python/distro/tests/resources/distros/manjaro1512/etc/manjaro-release b/third_party/python/distro/tests/resources/distros/manjaro1512/etc/manjaro-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/manjaro1512/etc/manjaro-release
+@@ -0,0 +1,1 @@
++Manjaro Linux
+diff --git a/third_party/python/distro/tests/resources/distros/manjaro1512/etc/os-release b/third_party/python/distro/tests/resources/distros/manjaro1512/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/manjaro1512/etc/os-release
+@@ -0,0 +1,7 @@
++NAME="Manjaro Linux"
++ID=manjaro
++PRETTY_NAME="Manjaro Linux"
++ANSI_COLOR="1;32"
++HOME_URL="http://www.manjaro.org/"
++SUPPORT_URL="http://www.manjaro.org/"
++BUG_REPORT_URL="http://bugs.manjaro.org/"
+diff --git a/third_party/python/distro/tests/resources/distros/netbsd711/bin/uname b/third_party/python/distro/tests/resources/distros/netbsd711/bin/uname
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/netbsd711/bin/uname
+@@ -0,0 +1,4 @@
++#!/bin/sh
++
++echo "NetBSD 7.1.1"
++
+diff --git a/third_party/python/distro/tests/resources/distros/openbsd62/bin/uname b/third_party/python/distro/tests/resources/distros/openbsd62/bin/uname
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/openbsd62/bin/uname
+@@ -0,0 +1,4 @@
++#!/bin/sh
++
++echo "OpenBSD 6.2"
++
+diff --git a/third_party/python/distro/tests/resources/distros/openelec6/etc/os-release b/third_party/python/distro/tests/resources/distros/openelec6/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/openelec6/etc/os-release
+@@ -0,0 +1,9 @@
++NAME="OpenELEC"
++VERSION="6.0.3"
++ID="openelec"
++VERSION_ID="6.0"
++PRETTY_NAME="OpenELEC (official) - Version: 6.0.3"
++HOME_URL="http://www.openelec.tv"
++BUG_REPORT_URL="https://github.com/OpenELEC/OpenELEC.tv"
++OPENELEC_ARCH="imx6.arm"
++OPENELEC_BUILD="official"
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/opensuse42/etc/SuSE-release b/third_party/python/distro/tests/resources/distros/opensuse42/etc/SuSE-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/opensuse42/etc/SuSE-release
+@@ -0,0 +1,1 @@
++openSUSE 42.1 (x86_64)
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/opensuse42/etc/os-release b/third_party/python/distro/tests/resources/distros/opensuse42/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/opensuse42/etc/os-release
+@@ -0,0 +1,10 @@
++NAME="openSUSE Leap"
++VERSION="42.1"
++VERSION_ID="42.1"
++PRETTY_NAME="openSUSE Leap 42.1 (x86_64)"
++ID=opensuse
++ANSI_COLOR="0;32"
++CPE_NAME="cpe:/o:opensuse:opensuse:42.1"
++BUG_REPORT_URL="https://bugs.opensuse.org"
++HOME_URL="https://opensuse.org/"
++ID_LIKE="suse"
+diff --git a/third_party/python/distro/tests/resources/distros/oracle7/etc/oracle-release b/third_party/python/distro/tests/resources/distros/oracle7/etc/oracle-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/oracle7/etc/oracle-release
+@@ -0,0 +1,1 @@
++Oracle Linux Server release 7.5
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/oracle7/etc/os-release b/third_party/python/distro/tests/resources/distros/oracle7/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/oracle7/etc/os-release
+@@ -0,0 +1,14 @@
++NAME="Oracle Linux Server"
++VERSION="7.5"
++ID="ol"
++VERSION_ID="7.5"
++PRETTY_NAME="Oracle Linux Server 7.5"
++ANSI_COLOR="0;31"
++CPE_NAME="cpe:/o:oracle:linux:7:5:server"
++HOME_URL="https://linux.oracle.com/"
++BUG_REPORT_URL="https://bugzilla.oracle.com/"
++
++ORACLE_BUGZILLA_PRODUCT="Oracle Linux 7"
++ORACLE_BUGZILLA_PRODUCT_VERSION=7.5
++ORACLE_SUPPORT_PRODUCT="Oracle Linux"
++ORACLE_SUPPORT_PRODUCT_VERSION=7.5
+diff --git a/third_party/python/distro/tests/resources/distros/raspbian7/etc/debian_version b/third_party/python/distro/tests/resources/distros/raspbian7/etc/debian_version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/raspbian7/etc/debian_version
+@@ -0,0 +1,1 @@
++7.1
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/raspbian7/etc/os-release b/third_party/python/distro/tests/resources/distros/raspbian7/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/raspbian7/etc/os-release
+@@ -0,0 +1,10 @@
++PRETTY_NAME="Raspbian GNU/Linux 7 (wheezy)"
++NAME="Raspbian GNU/Linux"
++VERSION_ID="7"
++VERSION="7 (wheezy)"
++ID=raspbian
++ID_LIKE=debian
++ANSI_COLOR="1;31"
++HOME_URL="http://www.raspbian.org/"
++SUPPORT_URL="http://www.raspbian.org/RaspbianForums"
++BUG_REPORT_URL="http://www.raspbian.org/RaspbianBugs"
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/raspbian7/etc/os-release.orig b/third_party/python/distro/tests/resources/distros/raspbian7/etc/os-release.orig
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/raspbian7/etc/os-release.orig
+@@ -0,0 +1,9 @@
++PRETTY_NAME="Debian #OSNAME# 7 (wheezy)"
++NAME="Debian #OSNAME#"
++VERSION_ID="7"
++VERSION="7 (wheezy)"
++ID=debian
++ANSI_COLOR="1;31"
++HOME_URL="http://www.debian.org/"
++SUPPORT_URL="http://www.debian.org/support/"
++BUG_REPORT_URL="http://bugs.debian.org/"
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/raspbian8/etc/debian_version b/third_party/python/distro/tests/resources/distros/raspbian8/etc/debian_version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/raspbian8/etc/debian_version
+@@ -0,0 +1,1 @@
++8.0
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/raspbian8/etc/os-release b/third_party/python/distro/tests/resources/distros/raspbian8/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/raspbian8/etc/os-release
+@@ -0,0 +1,9 @@
++PRETTY_NAME="Raspbian GNU/Linux 8 (jessie)"
++NAME="Raspbian GNU/Linux"
++VERSION_ID="8"
++VERSION="8 (jessie)"
++ID=raspbian
++ID_LIKE=debian
++HOME_URL="http://www.raspbian.org/"
++SUPPORT_URL="http://www.raspbian.org/RaspbianForums"
++BUG_REPORT_URL="http://www.raspbian.org/RaspbianBugs"
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/rhel5/etc/redhat-release b/third_party/python/distro/tests/resources/distros/rhel5/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/rhel5/etc/redhat-release
+@@ -0,0 +1,1 @@
++Red Hat Enterprise Linux Server release 5.11 (Tikanga)
+diff --git a/third_party/python/distro/tests/resources/distros/rhel6/etc/redhat-release b/third_party/python/distro/tests/resources/distros/rhel6/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/rhel6/etc/redhat-release
+@@ -0,0 +1,1 @@
++Red Hat Enterprise Linux Server release 6.5 (Santiago)
+diff --git a/third_party/python/distro/tests/resources/distros/rhel6/etc/system-release b/third_party/python/distro/tests/resources/distros/rhel6/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/rhel6/etc/system-release
+@@ -0,0 +1,1 @@
++Red Hat Enterprise Linux Server release 6.5 (Santiago)
+diff --git a/third_party/python/distro/tests/resources/distros/rhel7/etc/os-release b/third_party/python/distro/tests/resources/distros/rhel7/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/rhel7/etc/os-release
+@@ -0,0 +1,15 @@
++NAME="Red Hat Enterprise Linux Server"
++VERSION="7.0 (Maipo)"
++ID="rhel"
++ID_LIKE="fedora"
++VERSION_ID="7.0"
++PRETTY_NAME="Red Hat Enterprise Linux Server 7.0 (Maipo)"
++ANSI_COLOR="0;31"
++CPE_NAME="cpe:/o:redhat:enterprise_linux:7.0:GA:server"
++HOME_URL="https://www.redhat.com/"
++BUG_REPORT_URL="https://bugzilla.redhat.com/"
++
++REDHAT_BUGZILLA_PRODUCT="Red Hat Enterprise Linux 7"
++REDHAT_BUGZILLA_PRODUCT_VERSION=7.0
++REDHAT_SUPPORT_PRODUCT="Red Hat Enterprise Linux"
++REDHAT_SUPPORT_PRODUCT_VERSION=7.0
+diff --git a/third_party/python/distro/tests/resources/distros/rhel7/etc/redhat-release b/third_party/python/distro/tests/resources/distros/rhel7/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/rhel7/etc/redhat-release
+@@ -0,0 +1,1 @@
++Red Hat Enterprise Linux Server release 7.0 (Maipo)
+diff --git a/third_party/python/distro/tests/resources/distros/rhel7/etc/system-release b/third_party/python/distro/tests/resources/distros/rhel7/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/rhel7/etc/system-release
+@@ -0,0 +1,1 @@
++Red Hat Enterprise Linux Server release 7.0 (Maipo)
+diff --git a/third_party/python/distro/tests/resources/distros/scientific6/etc/redhat-release b/third_party/python/distro/tests/resources/distros/scientific6/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/scientific6/etc/redhat-release
+@@ -0,0 +1,1 @@
++Scientific Linux release 6.4 (Carbon)
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/scientific6/etc/system-release b/third_party/python/distro/tests/resources/distros/scientific6/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/scientific6/etc/system-release
+@@ -0,0 +1,1 @@
++Scientific Linux release 6.4 (Carbon)
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/scientific7/etc/os-release b/third_party/python/distro/tests/resources/distros/scientific7/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/scientific7/etc/os-release
+@@ -0,0 +1,15 @@
++NAME="Scientific Linux"
++VERSION="7.2 (Nitrogen)"
++ID="rhel"
++ID_LIKE="fedora"
++VERSION_ID="7.2"
++PRETTY_NAME="Scientific Linux 7.2 (Nitrogen)"
++ANSI_COLOR="0;31"
++CPE_NAME="cpe:/o:scientificlinux:scientificlinux:7.2:GA"
++HOME_URL="http://www.scientificlinux.org//"
++BUG_REPORT_URL="mailto:scientific-linux-devel@listserv.fnal.gov"
++
++REDHAT_BUGZILLA_PRODUCT="Scientific Linux 7"
++REDHAT_BUGZILLA_PRODUCT_VERSION=7.2
++REDHAT_SUPPORT_PRODUCT="Scientific Linux"
++REDHAT_SUPPORT_PRODUCT_VERSION="7.2"
+diff --git a/third_party/python/distro/tests/resources/distros/scientific7/etc/redhat-release b/third_party/python/distro/tests/resources/distros/scientific7/etc/redhat-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/scientific7/etc/redhat-release
+@@ -0,0 +1,1 @@
++Scientific Linux release 7.2 (Nitrogen)
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/scientific7/etc/sl-release b/third_party/python/distro/tests/resources/distros/scientific7/etc/sl-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/scientific7/etc/sl-release
+@@ -0,0 +1,1 @@
++Scientific Linux release 7.2 (Nitrogen)
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/scientific7/etc/system-release b/third_party/python/distro/tests/resources/distros/scientific7/etc/system-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/scientific7/etc/system-release
+@@ -0,0 +1,1 @@
++Scientific Linux release 7.2 (Nitrogen)
+\ No newline at end of file
+diff --git a/third_party/python/distro/tests/resources/distros/slackware14/etc/os-release b/third_party/python/distro/tests/resources/distros/slackware14/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/slackware14/etc/os-release
+@@ -0,0 +1,10 @@
++NAME=Slackware
++VERSION="14.1"
++ID=slackware
++VERSION_ID=14.1
++PRETTY_NAME="Slackware 14.1"
++ANSI_COLOR="0;34"
++CPE_NAME="cpe:/o:slackware:slackware_linux:14.1"
++HOME_URL="http://slackware.com/"
++SUPPORT_URL="http://www.linuxquestions.org/questions/slackware-14/"
++BUG_REPORT_URL="http://www.linuxquestions.org/questions/slackware-14/"
+diff --git a/third_party/python/distro/tests/resources/distros/slackware14/etc/slackware-version b/third_party/python/distro/tests/resources/distros/slackware14/etc/slackware-version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/slackware14/etc/slackware-version
+@@ -0,0 +1,1 @@
++Slackware 14.1
+diff --git a/third_party/python/distro/tests/resources/distros/sles12/bin/lsb_release b/third_party/python/distro/tests/resources/distros/sles12/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/sles12/bin/lsb_release
+@@ -0,0 +1,21 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command works without a corresponding
++# etc/lsb-release file.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++echo "LSB Version: n/a"
++echo "Distributor ID: SUSE LINUX"
++echo "Description: SUSE Linux Enterprise Server 12 SP1"
++echo "Release: 12.1"
++echo "Codename: n/a"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/sles12/etc/SuSE-release b/third_party/python/distro/tests/resources/distros/sles12/etc/SuSE-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/sles12/etc/SuSE-release
+@@ -0,0 +1,5 @@
++SUSE Linux Enterprise Server 12 (s390x)
++VERSION = 12
++PATCHLEVEL = 1
++# This file is deprecated and will be removed in a future service pack or release.
++# Please check /etc/os-release for details about this release.
+diff --git a/third_party/python/distro/tests/resources/distros/sles12/etc/os-release b/third_party/python/distro/tests/resources/distros/sles12/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/sles12/etc/os-release
+@@ -0,0 +1,7 @@
++NAME="SLES"
++VERSION="12-SP1"
++VERSION_ID="12.1"
++PRETTY_NAME="SUSE Linux Enterprise Server 12 SP1"
++ID="sles"
++ANSI_COLOR="0;32"
++CPE_NAME="cpe:/o:suse:sles:12:sp1"
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu14/bin/lsb_release b/third_party/python/distro/tests/resources/distros/ubuntu14/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu14/bin/lsb_release
+@@ -0,0 +1,39 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command reads an lsb-release file.
++#
++# The lsb-release file has the usual format, e.g.:
++# DISTRIB_ID=Ubuntu
++# DISTRIB_RELEASE=14.04
++# DISTRIB_CODENAME=trusty
++# DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
++# Where each line is optional. If a line is missing, the default value
++# will be the empty string.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++# Because the PATH is set to just this directory, we cannot use 'dirname'
++# or other external programs, but need to use built-in abilities of bash.
++LSB_FILE="${0%/*}/../etc/lsb-release"
++
++if [[ ! -f $LSB_FILE ]]; then
++ echo "Error: LSB release file does not exist: $LSB_FILE"
++ exit 1
++fi
++
++source $LSB_FILE
++
++echo "No LSB modules are available."
++echo "Distributor ID: ${DISTRIB_ID:-}"
++echo "Description: ${DISTRIB_DESCRIPTION:-}"
++echo "Release: ${DISTRIB_RELEASE:-}"
++echo "Codename: ${DISTRIB_CODENAME:-}"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu14/etc/debian_version b/third_party/python/distro/tests/resources/distros/ubuntu14/etc/debian_version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu14/etc/debian_version
+@@ -0,0 +1,1 @@
++jessie/sid
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu14/etc/lsb-release b/third_party/python/distro/tests/resources/distros/ubuntu14/etc/lsb-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu14/etc/lsb-release
+@@ -0,0 +1,4 @@
++DISTRIB_ID=Ubuntu
++DISTRIB_RELEASE=14.04
++DISTRIB_CODENAME=trusty
++DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu14/etc/os-release b/third_party/python/distro/tests/resources/distros/ubuntu14/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu14/etc/os-release
+@@ -0,0 +1,9 @@
++NAME="Ubuntu"
++VERSION="14.04.3 LTS, Trusty Tahr"
++ID=ubuntu
++ID_LIKE=debian
++PRETTY_NAME="Ubuntu 14.04.3 LTS"
++VERSION_ID="14.04"
++HOME_URL="http://www.ubuntu.com/"
++SUPPORT_URL="http://help.ubuntu.com/"
++BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu16/bin/lsb_release b/third_party/python/distro/tests/resources/distros/ubuntu16/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu16/bin/lsb_release
+@@ -0,0 +1,39 @@
++#!/bin/bash
++#
++# lsb_release command for testing the ld module.
++# Only the -a option is supported.
++#
++# This version of the lsb_release command reads an lsb-release file.
++#
++# The lsb-release file has the usual format, e.g.:
++# DISTRIB_ID=Ubuntu
++# DISTRIB_RELEASE=14.04
++# DISTRIB_CODENAME=trusty
++# DISTRIB_DESCRIPTION="Ubuntu 14.04.3 LTS"
++# Where each line is optional. If a line is missing, the default value
++# will be the empty string.
++#
++
++if [[ "$@" != "-a" ]]; then
++ echo "Usage: lsb_release -a"
++ exit 2
++fi
++
++# Because the PATH is set to just this directory, we cannot use 'dirname'
++# or other external programs, but need to use built-in abilities of bash.
++LSB_FILE="${0%/*}/../etc/lsb-release"
++
++if [[ ! -f $LSB_FILE ]]; then
++ echo "Error: LSB release file does not exist: $LSB_FILE"
++ exit 1
++fi
++
++source $LSB_FILE
++
++echo "No LSB modules are available."
++echo "Distributor ID: ${DISTRIB_ID:-}"
++echo "Description: ${DISTRIB_DESCRIPTION:-}"
++echo "Release: ${DISTRIB_RELEASE:-}"
++echo "Codename: ${DISTRIB_CODENAME:-}"
++
++exit 0
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu16/etc/debian_version b/third_party/python/distro/tests/resources/distros/ubuntu16/etc/debian_version
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu16/etc/debian_version
+@@ -0,0 +1,1 @@
++stretch/sid
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu16/etc/lsb-release b/third_party/python/distro/tests/resources/distros/ubuntu16/etc/lsb-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu16/etc/lsb-release
+@@ -0,0 +1,4 @@
++DISTRIB_ID=Ubuntu
++DISTRIB_RELEASE=16.04
++DISTRIB_CODENAME=xenial
++DISTRIB_DESCRIPTION="Ubuntu 16.04.1 LTS"
+diff --git a/third_party/python/distro/tests/resources/distros/ubuntu16/etc/os-release b/third_party/python/distro/tests/resources/distros/ubuntu16/etc/os-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/distros/ubuntu16/etc/os-release
+@@ -0,0 +1,10 @@
++NAME="Ubuntu"
++VERSION="16.04.1 LTS (Xenial Xerus)"
++ID=ubuntu
++ID_LIKE=debian
++PRETTY_NAME="Ubuntu 16.04.1 LTS"
++VERSION_ID="16.04"
++HOME_URL="http://www.ubuntu.com/"
++SUPPORT_URL="http://help.ubuntu.com/"
++BUG_REPORT_URL="http://bugs.launchpad.net/ubuntu/"
++UBUNTU_CODENAME=xenial
+diff --git a/third_party/python/distro/tests/resources/special/empty-release b/third_party/python/distro/tests/resources/special/empty-release
+new file mode 100644
+diff --git a/third_party/python/distro/tests/resources/testdistros/distro/baduname/bin/uname b/third_party/python/distro/tests/resources/testdistros/distro/baduname/bin/uname
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/distro/baduname/bin/uname
+@@ -0,0 +1,2 @@
++#!/bin/sh
++echo "I'm a bad uname file!"
+diff --git a/third_party/python/distro/tests/resources/testdistros/distro/unknowndistro/etc/unknowndistro-release b/third_party/python/distro/tests/resources/testdistros/distro/unknowndistro/etc/unknowndistro-release
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/distro/unknowndistro/etc/unknowndistro-release
+@@ -0,0 +1,1 @@
++Unknown Distro release 1.0 (Unknown Codename)
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc001/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc001/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc001/bin/lsb_release
+@@ -0,0 +1,5 @@
++#!/bin/bash
++rc=1
++msg="General error"
++echo "Test failure - exiting with $rc ($msg)"
++exit $rc
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc002/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc002/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc002/bin/lsb_release
+@@ -0,0 +1,5 @@
++#!/bin/bash
++rc=2
++msg="Misuse of shell builtins, or missing keyword or command"
++echo "Test failure - exiting with $rc ($msg)"
++exit $rc
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc126/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc126/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc126/bin/lsb_release
+@@ -0,0 +1,5 @@
++#!/bin/bash
++rc=126
++msg="Cannot execute command"
++echo "Test failure - exiting with $rc ($msg)"
++exit $rc
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc130/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc130/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc130/bin/lsb_release
+@@ -0,0 +1,5 @@
++#!/bin/bash
++rc=130
++msg="Signal 2 - Script terminated with Ctrl-C"
++echo "Test failure - exiting with $rc ($msg)"
++exit $rc
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc255/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc255/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/lsb_rc255/bin/lsb_release
+@@ -0,0 +1,5 @@
++#!/bin/bash
++rc=255
++msg="Exit code out of range"
++echo "Test failure - exiting with $rc ($msg)"
++exit $rc
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_nomodules/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_nomodules/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_nomodules/bin/lsb_release
+@@ -0,0 +1,8 @@
++#!/bin/bash
++/bin/cat <<'EOT'
++No LSB modules are available.
++Distributor ID: Ubuntu
++Description: Ubuntu 14.04.3 LTS
++Release: 14.04
++Codename: trusty
++EOT
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_normal/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_normal/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_normal/bin/lsb_release
+@@ -0,0 +1,7 @@
++#!/bin/bash
++/bin/cat <<'EOT'
++Distributor ID: Ubuntu
++Description: Ubuntu 14.04.3 LTS
++Release: 14.04
++Codename: trusty
++EOT
+diff --git a/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_trailingblanks/bin/lsb_release b/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_trailingblanks/bin/lsb_release
+new file mode 100755
+--- /dev/null
++++ b/third_party/python/distro/tests/resources/testdistros/lsb/ubuntu14_trailingblanks/bin/lsb_release
+@@ -0,0 +1,8 @@
++#!/bin/bash
++/bin/cat <<'EOT'
++No LSB modules are available.
++Distributor ID: Ubuntu
++Description: Ubuntu 14.04.3 LTS
++Release: 14.04
++Codename: trusty
++EOT
+diff --git a/third_party/python/distro/tests/test_distro.py b/third_party/python/distro/tests/test_distro.py
+new file mode 100644
+--- /dev/null
++++ b/third_party/python/distro/tests/test_distro.py
+@@ -0,0 +1,2062 @@
++# Copyright 2015,2016 Nir Cohen
++#
++# Licensed under the Apache License, Version 2.0 (the "License");
++# you may not use this file except in compliance with the License.
++# You may obtain a copy of the License at
++#
++# http://www.apache.org/licenses/LICENSE-2.0
++#
++# Unless required by applicable law or agreed to in writing, software
++# distributed under the License is distributed on an "AS IS" BASIS,
++# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++# See the License for the specific language governing permissions and
++# limitations under the License.
++
++import os
++import sys
++import ast
++import subprocess
++try:
++ from StringIO import StringIO # Python 2.x
++except ImportError:
++ from io import StringIO # Python 3.x
++
++import pytest
++
++
++BASE = os.path.abspath(os.path.dirname(__file__))
++RESOURCES = os.path.join(BASE, 'resources')
++DISTROS_DIR = os.path.join(RESOURCES, 'distros')
++TESTDISTROS = os.path.join(RESOURCES, 'testdistros')
++SPECIAL = os.path.join(RESOURCES, 'special')
++DISTROS = [dist for dist in os.listdir(DISTROS_DIR) if dist != '__shared__']
++
++
++IS_LINUX = sys.platform.startswith('linux')
++if IS_LINUX:
++ import distro
++
++ RELATIVE_UNIXCONFDIR = distro._UNIXCONFDIR[1:]
++ MODULE_DISTRO = distro._distro
++
++
++class TestNonLinuxPlatform:
++ """Obviously, this only tests Windows. Will add OS X tests on Travis
++ Later
++ """
++
++ def test_cant_use_on_windows(self):
++ try:
++ import distro # NOQA
++ except ImportError as ex:
++ assert 'Unsupported platform' in str(ex)
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestCli:
++
++ def _parse(self, command):
++ sys.argv = command.split()
++ distro.main()
++
++ def _run(self, command):
++ stdout, _ = subprocess.Popen(
++ command,
++ stdout=subprocess.PIPE,
++ stderr=subprocess.PIPE).communicate()
++ # Need to decode or we get bytes in Python 3.x
++ return stdout.decode('utf-8')
++
++ def test_cli_for_coverage_yuch(self):
++ self._parse('distro')
++ self._parse('distro -j')
++
++ def test_cli(self):
++ command = [sys.executable, '-m', 'distro']
++ desired_output = 'Name: ' + distro.name(pretty=True)
++ distro_version = distro.version(pretty=True)
++ distro_codename = distro.codename()
++ desired_output += '\n' + 'Version: ' + distro_version
++ desired_output += '\n' + 'Codename: ' + distro_codename
++ desired_output += '\n'
++ assert self._run(command) == desired_output
++
++ def test_cli_json(self):
++ command = [sys.executable, '-m', 'distro', '-j']
++ assert ast.literal_eval(self._run(command)) == distro.info()
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class DistroTestCase(object):
++ """A base class for any testcase classes that test the distributions
++ represented in the `DISTROS` subtree.
++ """
++
++ def setup_method(self, test_method):
++ # The environment stays the same across all testcases, so we
++ # save and restore the PATH env var in each test case that
++ # changes it:
++ self._saved_path = os.environ["PATH"]
++ self._saved_UNIXCONFDIR = distro._UNIXCONFDIR
++
++ def teardown_method(self, test_method):
++ os.environ["PATH"] = self._saved_path
++ distro._UNIXCONFDIR = self._saved_UNIXCONFDIR
++
++ def _setup_for_distro(self, distro_root):
++ distro_bin = os.path.join(distro_root, 'bin')
++ # We don't want to pick up a possibly present lsb_release in the
++ # distro that runs this test, so we use a PATH with only one entry:
++ os.environ["PATH"] = distro_bin
++ distro._UNIXCONFDIR = os.path.join(distro_root, RELATIVE_UNIXCONFDIR)
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestOSRelease:
++
++ def setup_method(self, test_method):
++ dist = test_method.__name__.split('_')[1]
++ os_release = os.path.join(DISTROS_DIR, dist, 'etc', 'os-release')
++ self.distro = distro.LinuxDistribution(False, os_release, 'non')
++
++ def _test_outcome(self, outcome):
++ assert self.distro.id() == outcome.get('id', '')
++ assert self.distro.name() == outcome.get('name', '')
++ assert self.distro.name(pretty=True) == outcome.get('pretty_name', '')
++ assert self.distro.version() == outcome.get('version', '')
++ assert self.distro.version(pretty=True) == \
++ outcome.get('pretty_version', '')
++ assert self.distro.version(best=True) == \
++ outcome.get('best_version', '')
++ assert self.distro.like() == outcome.get('like', '')
++ assert self.distro.codename() == outcome.get('codename', '')
++
++ def test_arch_os_release(self):
++ desired_outcome = {
++ 'id': 'arch',
++ 'name': 'Arch Linux',
++ 'pretty_name': 'Arch Linux',
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_kali_os_release(self):
++ desired_outcome = {
++ 'id': 'kali',
++ 'name': 'Kali GNU/Linux',
++ 'pretty_name': 'Kali GNU/Linux Rolling',
++ 'version': '2017.1',
++ 'pretty_version': '2017.1',
++ 'best_version': '2017.1',
++ 'like': 'debian'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_centos7_os_release(self):
++ desired_outcome = {
++ 'id': 'centos',
++ 'name': 'CentOS Linux',
++ 'pretty_name': 'CentOS Linux 7 (Core)',
++ 'version': '7',
++ 'pretty_version': '7 (Core)',
++ 'best_version': '7',
++ 'like': 'rhel fedora',
++ 'codename': 'Core'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_coreos_os_release(self):
++ desired_outcome = {
++ 'id': 'coreos',
++ 'name': 'CoreOS',
++ 'pretty_name': 'CoreOS 899.15.0',
++ 'version': '899.15.0',
++ 'pretty_version': '899.15.0',
++ 'best_version': '899.15.0'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_debian8_os_release(self):
++ desired_outcome = {
++ 'id': 'debian',
++ 'name': 'Debian GNU/Linux',
++ 'pretty_name': 'Debian GNU/Linux 8 (jessie)',
++ 'version': '8',
++ 'pretty_version': '8 (jessie)',
++ 'best_version': '8',
++ 'codename': 'jessie'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_fedora19_os_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': u'Fedora 19 (Schr\u00F6dinger\u2019s Cat)',
++ 'version': '19',
++ 'pretty_version': u'19 (Schr\u00F6dinger\u2019s Cat)',
++ 'best_version': '19',
++ 'codename': u'Schr\u00F6dinger\u2019s Cat'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_fedora23_os_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': 'Fedora 23 (Twenty Three)',
++ 'version': '23',
++ 'pretty_version': '23 (Twenty Three)',
++ 'best_version': '23',
++ 'codename': 'Twenty Three'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_fedora30_os_release(self):
++ # Fedora 21 and above no longer have code names but the metadata in os-release was only
++ # changed in a detectable way in Fedora 30+. The piece in parenthesis in the pretty_name
++ # field contains the VARIANT and differs depending on the variant which was installed.
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': 'Fedora 30 (Thirty)',
++ 'version': '30',
++ 'pretty_version': '30',
++ 'best_version': '30',
++ 'codename': ''
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_kvmibm1_os_release(self):
++ desired_outcome = {
++ 'id': 'kvmibm',
++ 'name': 'KVM for IBM z Systems',
++ 'pretty_name': 'KVM for IBM z Systems 1.1.1 (Z)',
++ 'version': '1.1.1',
++ 'pretty_version': '1.1.1 (Z)',
++ 'best_version': '1.1.1',
++ 'like': 'rhel fedora',
++ 'codename': 'Z'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_linuxmint17_os_release(self):
++ # Note: LinuxMint 17 actually *does* have Ubuntu 14.04 data in its
++ # os-release file. See discussion in GitHub issue #78.
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 14.04.3 LTS',
++ 'version': '14.04',
++ 'pretty_version': '14.04 (Trusty Tahr)',
++ 'best_version': '14.04.3',
++ 'like': 'debian',
++ 'codename': 'Trusty Tahr'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_mageia5_os_release(self):
++ desired_outcome = {
++ 'id': 'mageia',
++ 'name': 'Mageia',
++ 'pretty_name': 'Mageia 5',
++ 'version': '5',
++ 'pretty_version': '5',
++ 'best_version': '5',
++ 'like': 'mandriva fedora',
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_manjaro1512_os_release(self):
++ self._test_outcome({
++ 'id': 'manjaro',
++ 'name': 'Manjaro Linux',
++ 'pretty_name': 'Manjaro Linux',
++ })
++
++ def test_opensuse42_os_release(self):
++ desired_outcome = {
++ 'id': 'opensuse',
++ 'name': 'openSUSE Leap',
++ 'pretty_name': 'openSUSE Leap 42.1 (x86_64)',
++ 'version': '42.1',
++ 'pretty_version': '42.1',
++ 'best_version': '42.1',
++ 'like': 'suse',
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_raspbian7_os_release(self):
++ desired_outcome = {
++ 'id': 'raspbian',
++ 'name': 'Raspbian GNU/Linux',
++ 'pretty_name': 'Raspbian GNU/Linux 7 (wheezy)',
++ 'version': '7',
++ 'pretty_version': '7 (wheezy)',
++ 'best_version': '7',
++ 'like': 'debian',
++ 'codename': 'wheezy'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_raspbian8_os_release(self):
++ desired_outcome = {
++ 'id': 'raspbian',
++ 'name': 'Raspbian GNU/Linux',
++ 'pretty_name': 'Raspbian GNU/Linux 8 (jessie)',
++ 'version': '8',
++ 'pretty_version': '8 (jessie)',
++ 'best_version': '8',
++ 'like': 'debian',
++ 'codename': 'jessie'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_rhel7_os_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'pretty_name': 'Red Hat Enterprise Linux Server 7.0 (Maipo)',
++ 'version': '7.0',
++ 'pretty_version': '7.0 (Maipo)',
++ 'best_version': '7.0',
++ 'like': 'fedora',
++ 'codename': 'Maipo'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_slackware14_os_release(self):
++ desired_outcome = {
++ 'id': 'slackware',
++ 'name': 'Slackware',
++ 'pretty_name': 'Slackware 14.1',
++ 'version': '14.1',
++ 'pretty_version': '14.1',
++ 'best_version': '14.1'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_sles12_os_release(self):
++ desired_outcome = {
++ 'id': 'sles',
++ 'name': 'SLES',
++ 'pretty_name': 'SUSE Linux Enterprise Server 12 SP1',
++ 'version': '12.1',
++ 'pretty_version': '12.1',
++ 'best_version': '12.1'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_ubuntu14_os_release(self):
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 14.04.3 LTS',
++ 'version': '14.04',
++ 'pretty_version': '14.04 (Trusty Tahr)',
++ 'best_version': '14.04.3',
++ 'like': 'debian',
++ 'codename': 'Trusty Tahr'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_ubuntu16_os_release(self):
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 16.04.1 LTS',
++ 'version': '16.04',
++ 'pretty_version': '16.04 (xenial)',
++ 'best_version': '16.04.1',
++ 'like': 'debian',
++ 'codename': 'xenial'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_amazon2016_os_release(self):
++ desired_outcome = {
++ 'id': 'amzn',
++ 'name': 'Amazon Linux AMI',
++ 'pretty_name': 'Amazon Linux AMI 2016.03',
++ 'version': '2016.03',
++ 'pretty_version': '2016.03',
++ 'best_version': '2016.03',
++ 'like': 'rhel fedora'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_scientific7_os_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Scientific Linux',
++ 'pretty_name': 'Scientific Linux 7.2 (Nitrogen)',
++ 'version': '7.2',
++ 'pretty_version': '7.2 (Nitrogen)',
++ 'best_version': '7.2',
++ 'like': 'fedora',
++ 'codename': 'Nitrogen'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_gentoo_os_release(self):
++ desired_outcome = {
++ 'id': 'gentoo',
++ 'name': 'Gentoo',
++ 'pretty_name': 'Gentoo/Linux',
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_openelec6_os_release(self):
++ desired_outcome = {
++ 'id': 'openelec',
++ 'name': 'OpenELEC',
++ 'pretty_name': 'OpenELEC (official) - Version: 6.0.3',
++ 'version': '6.0',
++ 'pretty_version': '6.0',
++ 'best_version': '6.0.3',
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_cloudlinux7_os_release(self):
++ desired_outcome = {
++ 'id': 'cloudlinux',
++ 'codename': 'Yury Malyshev',
++ 'name': 'CloudLinux',
++ 'pretty_name': 'CloudLinux 7.3 (Yury Malyshev)',
++ 'like': 'rhel fedora centos',
++ 'version': '7.3',
++ 'pretty_version': '7.3 (Yury Malyshev)',
++ 'best_version': '7.3',
++ 'major_version': '7',
++ 'minor_version': '3'
++ }
++ self._test_outcome(desired_outcome)
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestLSBRelease(DistroTestCase):
++
++ def setup_method(self, test_method):
++ super(TestLSBRelease, self).setup_method(test_method)
++ dist = test_method.__name__.split('_')[1]
++ self._setup_for_distro(os.path.join(DISTROS_DIR, dist))
++ self.distro = distro.LinuxDistribution(True, 'non', 'non')
++
++ def _test_outcome(self, outcome):
++ assert self.distro.id() == outcome.get('id', '')
++ assert self.distro.name() == outcome.get('name', '')
++ assert self.distro.name(pretty=True) == outcome.get('pretty_name', '')
++ assert self.distro.version() == outcome.get('version', '')
++ assert self.distro.version(pretty=True) == \
++ outcome.get('pretty_version', '')
++ assert self.distro.version(best=True) == \
++ outcome.get('best_version', '')
++ assert self.distro.like() == outcome.get('like', '')
++ assert self.distro.codename() == outcome.get('codename', '')
++
++ def test_linuxmint17_lsb_release(self):
++ desired_outcome = {
++ 'id': 'linuxmint',
++ 'name': 'LinuxMint',
++ 'pretty_name': 'Linux Mint 17.3 Rosa',
++ 'version': '17.3',
++ 'pretty_version': '17.3 (rosa)',
++ 'best_version': '17.3',
++ 'codename': 'rosa'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_manjaro1512_lsb_release(self):
++ self._test_outcome({
++ 'id': 'manjarolinux',
++ 'name': 'ManjaroLinux',
++ 'pretty_name': 'Manjaro Linux',
++ 'version': '15.12',
++ 'pretty_version': '15.12 (Capella)',
++ 'best_version': '15.12',
++ 'codename': 'Capella'
++ })
++
++ # @pytest.mark.xfail
++ # def test_openelec6_lsb_release(self):
++ # # TODO: This should be fixed as part of #109 when dealing
++ # # with distro inconsistencies
++ # desired_outcome = {
++ # 'id': 'openelec',
++ # 'name': 'OpenELEC',
++ # 'pretty_name': 'OpenELEC (official) - Version: 6.0.3',
++ # 'version': '6.0.3',
++ # 'pretty_version': '6.0.3',
++ # 'best_version': '6.0.3',
++ # }
++ # self._test_outcome(desired_outcome)
++
++ def test_openbsd62_uname(self):
++ self._test_outcome({
++ 'id': 'openbsd',
++ 'name': 'OpenBSD',
++ 'version': '6.2',
++ 'pretty_name': 'OpenBSD 6.2',
++ 'pretty_version': '6.2',
++ 'best_version': '6.2'
++ })
++
++ def test_netbsd711_uname(self):
++ self._test_outcome({
++ 'id': 'netbsd',
++ 'name': 'NetBSD',
++ 'version': '7.1.1',
++ 'pretty_name': 'NetBSD 7.1.1',
++ 'pretty_version': '7.1.1',
++ 'best_version': '7.1.1'
++ })
++
++ def test_freebsd111_uname(self):
++ self._test_outcome({
++ 'id': 'freebsd',
++ 'name': 'FreeBSD',
++ 'version': '11.1',
++ 'pretty_name': 'FreeBSD 11.1',
++ 'pretty_version': '11.1',
++ 'best_version': '11.1'
++ })
++
++ def test_ubuntu14normal_lsb_release(self):
++ self._setup_for_distro(os.path.join(TESTDISTROS, 'lsb',
++ 'ubuntu14_normal'))
++
++ self.distro = distro.LinuxDistribution(True, 'non', 'non')
++
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 14.04.3 LTS',
++ 'version': '14.04',
++ 'pretty_version': '14.04 (trusty)',
++ 'best_version': '14.04.3',
++ 'codename': 'trusty'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_ubuntu14nomodules_lsb_release(self):
++ self._setup_for_distro(os.path.join(TESTDISTROS, 'lsb',
++ 'ubuntu14_nomodules'))
++
++ self.distro = distro.LinuxDistribution(True, 'non', 'non')
++
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 14.04.3 LTS',
++ 'version': '14.04',
++ 'pretty_version': '14.04 (trusty)',
++ 'best_version': '14.04.3',
++ 'codename': 'trusty'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_trailingblanks_lsb_release(self):
++ self._setup_for_distro(os.path.join(TESTDISTROS, 'lsb',
++ 'ubuntu14_trailingblanks'))
++
++ self.distro = distro.LinuxDistribution(True, 'non', 'non')
++
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 14.04.3 LTS',
++ 'version': '14.04',
++ 'pretty_version': '14.04 (trusty)',
++ 'best_version': '14.04.3',
++ 'codename': 'trusty'
++ }
++ self._test_outcome(desired_outcome)
++
++ @pytest.mark.parametrize('errnum', ('001', '002', '126', '130', '255'))
++ def test_lsb_release_error_level(self, errnum):
++ self._setup_for_distro(os.path.join(
++ TESTDISTROS, 'lsb', 'lsb_rc{0}'.format(errnum)))
++ with pytest.raises(subprocess.CalledProcessError) as excinfo:
++ distro.LinuxDistribution(True, 'non', 'non')._lsb_release_info
++ assert excinfo.value.returncode == int(errnum)
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestSpecialRelease(DistroTestCase):
++ def _test_outcome(self, outcome):
++ assert self.distro.id() == outcome.get('id', '')
++ assert self.distro.name() == outcome.get('name', '')
++ assert self.distro.name(pretty=True) == outcome.get('pretty_name', '')
++ assert self.distro.version() == outcome.get('version', '')
++ assert self.distro.version(pretty=True) == \
++ outcome.get('pretty_version', '')
++ assert self.distro.version(best=True) == \
++ outcome.get('best_version', '')
++ assert self.distro.like() == outcome.get('like', '')
++ assert self.distro.codename() == outcome.get('codename', '')
++ assert self.distro.major_version() == outcome.get('major_version', '')
++ assert self.distro.minor_version() == outcome.get('minor_version', '')
++ assert self.distro.build_number() == outcome.get('build_number', '')
++
++ def test_empty_release(self):
++ distro_release = os.path.join(SPECIAL, 'empty-release')
++ self.distro = distro.LinuxDistribution(False, 'non', distro_release)
++
++ desired_outcome = {
++ 'id': 'empty'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_unknowndistro_release(self):
++ self._setup_for_distro(os.path.join(TESTDISTROS, 'distro',
++ 'unknowndistro'))
++
++ self.distro = distro.LinuxDistribution()
++
++ desired_outcome = {
++ 'id': 'unknowndistro',
++ 'name': 'Unknown Distro',
++ 'pretty_name': 'Unknown Distro 1.0 (Unknown Codename)',
++ 'version': '1.0',
++ 'pretty_version': '1.0 (Unknown Codename)',
++ 'best_version': '1.0',
++ 'codename': 'Unknown Codename',
++ 'major_version': '1',
++ 'minor_version': '0'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_bad_uname(self):
++ self._setup_for_distro(os.path.join(TESTDISTROS, 'distro',
++ 'baduname'))
++ self.distro = distro.LinuxDistribution()
++
++ assert self.distro.uname_attr('id') == ''
++ assert self.distro.uname_attr('name') == ''
++ assert self.distro.uname_attr('release') == ''
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestDistroRelease:
++
++ def _test_outcome(self,
++ outcome,
++ distro_name='',
++ version='',
++ release_file_id='',
++ release_file_suffix='release'):
++ release_file_id = release_file_id or distro_name
++ distro_release = os.path.join(
++ DISTROS_DIR, distro_name + version, 'etc', '{0}-{1}'.format(
++ release_file_id, release_file_suffix))
++ self.distro = distro.LinuxDistribution(False, 'non', distro_release)
++
++ assert self.distro.id() == outcome.get('id', '')
++ assert self.distro.name() == outcome.get('name', '')
++ assert self.distro.name(pretty=True) == outcome.get('pretty_name', '')
++ assert self.distro.version() == outcome.get('version', '')
++ assert self.distro.version(pretty=True) == \
++ outcome.get('pretty_version', '')
++ assert self.distro.version(best=True) == \
++ outcome.get('best_version', '')
++ assert self.distro.like() == outcome.get('like', '')
++ assert self.distro.codename() == outcome.get('codename', '')
++ assert self.distro.major_version() == outcome.get('major_version', '')
++ assert self.distro.minor_version() == outcome.get('minor_version', '')
++ assert self.distro.build_number() == outcome.get('build_number', '')
++
++ def test_arch_dist_release(self):
++ desired_outcome = {
++ 'id': 'arch'
++ }
++ self._test_outcome(desired_outcome, 'arch')
++
++ def test_centos5_dist_release(self):
++ desired_outcome = {
++ 'id': 'centos',
++ 'name': 'CentOS',
++ 'pretty_name': 'CentOS 5.11 (Final)',
++ 'version': '5.11',
++ 'pretty_version': '5.11 (Final)',
++ 'best_version': '5.11',
++ 'codename': 'Final',
++ 'major_version': '5',
++ 'minor_version': '11'
++ }
++ self._test_outcome(desired_outcome, 'centos', '5')
++
++ def test_centos7_dist_release(self):
++ desired_outcome = {
++ 'id': 'centos',
++ 'name': 'CentOS Linux',
++ 'pretty_name': 'CentOS Linux 7.1.1503 (Core)',
++ 'version': '7.1.1503',
++ 'pretty_version': '7.1.1503 (Core)',
++ 'best_version': '7.1.1503',
++ 'codename': 'Core',
++ 'major_version': '7',
++ 'minor_version': '1',
++ 'build_number': '1503'
++ }
++ self._test_outcome(desired_outcome, 'centos', '7')
++
++ def test_fedora19_dist_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': u'Fedora 19 (Schr\u00F6dinger\u2019s Cat)',
++ 'version': '19',
++ 'pretty_version': u'19 (Schr\u00F6dinger\u2019s Cat)',
++ 'best_version': '19',
++ 'codename': u'Schr\u00F6dinger\u2019s Cat',
++ 'major_version': '19'
++ }
++ self._test_outcome(desired_outcome, 'fedora', '19')
++
++ def test_fedora23_dist_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': 'Fedora 23 (Twenty Three)',
++ 'version': '23',
++ 'pretty_version': '23 (Twenty Three)',
++ 'best_version': '23',
++ 'codename': 'Twenty Three',
++ 'major_version': '23'
++ }
++ self._test_outcome(desired_outcome, 'fedora', '23')
++
++ def test_fedora30_dist_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': 'Fedora 30 (Thirty)',
++ 'version': '30',
++ 'pretty_version': '30 (Thirty)',
++ 'best_version': '30',
++ 'codename': 'Thirty',
++ 'major_version': '30'
++ }
++ self._test_outcome(desired_outcome, 'fedora', '30')
++
++ def test_gentoo_dist_release(self):
++ desired_outcome = {
++ 'id': 'gentoo',
++ 'name': 'Gentoo Base System',
++ 'pretty_name': 'Gentoo Base System 2.2',
++ 'version': '2.2',
++ 'pretty_version': '2.2',
++ 'best_version': '2.2',
++ 'major_version': '2',
++ 'minor_version': '2',
++ }
++ self._test_outcome(desired_outcome, 'gentoo')
++
++ def test_kvmibm1_dist_release(self):
++ desired_outcome = {
++ 'id': 'base',
++ 'name': 'KVM for IBM z Systems',
++ 'pretty_name': 'KVM for IBM z Systems 1.1.1 (Z)',
++ 'version': '1.1.1',
++ 'pretty_version': '1.1.1 (Z)',
++ 'best_version': '1.1.1',
++ 'codename': 'Z',
++ 'major_version': '1',
++ 'minor_version': '1',
++ 'build_number': '1'
++ }
++ self._test_outcome(desired_outcome, 'kvmibm', '1', 'base')
++
++ def test_mageia5_dist_release(self):
++ desired_outcome = {
++ 'id': 'mageia',
++ 'name': 'Mageia',
++ 'pretty_name': 'Mageia 5 (Official)',
++ 'version': '5',
++ 'pretty_version': '5 (Official)',
++ 'best_version': '5',
++ 'codename': 'Official',
++ 'major_version': '5'
++ }
++ self._test_outcome(desired_outcome, 'mageia', '5')
++
++ def test_manjaro1512_dist_release(self):
++ self._test_outcome({
++ 'id': 'manjaro',
++ 'name': 'Manjaro Linux',
++ 'pretty_name': 'Manjaro Linux',
++ 'version': '',
++ 'codename': ''
++ }, 'manjaro', '1512')
++
++ def test_opensuse42_dist_release(self):
++ desired_outcome = {
++ 'id': 'suse',
++ 'name': 'openSUSE',
++ 'pretty_name': 'openSUSE 42.1 (x86_64)',
++ 'version': '42.1',
++ 'pretty_version': '42.1 (x86_64)',
++ 'best_version': '42.1',
++ 'codename': 'x86_64',
++ 'major_version': '42',
++ 'minor_version': '1'
++ }
++ self._test_outcome(desired_outcome, 'opensuse', '42', 'SuSE')
++
++ def test_oracle7_dist_release(self):
++ desired_outcome = {
++ 'id': 'oracle',
++ 'name': 'Oracle Linux Server',
++ 'pretty_name': 'Oracle Linux Server 7.5',
++ 'version': '7.5',
++ 'pretty_version': '7.5',
++ 'best_version': '7.5',
++ 'major_version': '7',
++ 'minor_version': '5'
++ }
++ self._test_outcome(desired_outcome, 'oracle', '7')
++
++ def test_rhel6_dist_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'pretty_name': 'Red Hat Enterprise Linux Server 6.5 (Santiago)',
++ 'version': '6.5',
++ 'pretty_version': '6.5 (Santiago)',
++ 'best_version': '6.5',
++ 'codename': 'Santiago',
++ 'major_version': '6',
++ 'minor_version': '5'
++ }
++ self._test_outcome(desired_outcome, 'rhel', '6', 'redhat')
++
++ def test_rhel7_dist_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'pretty_name': 'Red Hat Enterprise Linux Server 7.0 (Maipo)',
++ 'version': '7.0',
++ 'pretty_version': '7.0 (Maipo)',
++ 'best_version': '7.0',
++ 'codename': 'Maipo',
++ 'major_version': '7',
++ 'minor_version': '0'
++ }
++ self._test_outcome(desired_outcome, 'rhel', '7', 'redhat')
++
++ def test_slackware14_dist_release(self):
++ desired_outcome = {
++ 'id': 'slackware',
++ 'name': 'Slackware',
++ 'pretty_name': 'Slackware 14.1',
++ 'version': '14.1',
++ 'pretty_version': '14.1',
++ 'best_version': '14.1',
++ 'major_version': '14',
++ 'minor_version': '1'
++ }
++ self._test_outcome(
++ desired_outcome,
++ 'slackware',
++ '14',
++ release_file_suffix='version')
++
++ def test_sles12_dist_release(self):
++ desired_outcome = {
++ 'id': 'suse',
++ 'name': 'SUSE Linux Enterprise Server',
++ 'pretty_name': 'SUSE Linux Enterprise Server 12 (s390x)',
++ 'version': '12',
++ 'pretty_version': '12 (s390x)',
++ 'best_version': '12',
++ 'major_version': '12',
++ 'codename': 's390x'
++ }
++ self._test_outcome(desired_outcome, 'sles', '12', 'SuSE')
++
++ def test_cloudlinux5_dist_release(self):
++ # Uses redhat-release only to get information.
++ # The id of 'rhel' can only be fixed with issue #109.
++ desired_outcome = {
++ 'id': 'cloudlinux',
++ 'codename': 'Vladislav Volkov',
++ 'name': 'CloudLinux Server',
++ 'pretty_name': 'CloudLinux Server 5.11 (Vladislav Volkov)',
++ 'version': '5.11',
++ 'pretty_version': '5.11 (Vladislav Volkov)',
++ 'best_version': '5.11',
++ 'major_version': '5',
++ 'minor_version': '11'
++ }
++ self._test_outcome(desired_outcome, 'cloudlinux', '5', 'redhat')
++
++ def test_cloudlinux6_dist_release(self):
++ # Same as above, only has redhat-release.
++ desired_outcome = {
++ 'id': 'cloudlinux',
++ 'codename': 'Oleg Makarov',
++ 'name': 'CloudLinux Server',
++ 'pretty_name': 'CloudLinux Server 6.8 (Oleg Makarov)',
++ 'version': '6.8',
++ 'pretty_version': '6.8 (Oleg Makarov)',
++ 'best_version': '6.8',
++ 'major_version': '6',
++ 'minor_version': '8'
++ }
++ self._test_outcome(desired_outcome, 'cloudlinux', '6', 'redhat')
++
++ def test_cloudlinux7_dist_release(self):
++ desired_outcome = {
++ 'id': 'cloudlinux',
++ 'codename': 'Yury Malyshev',
++ 'name': 'CloudLinux',
++ 'pretty_name': 'CloudLinux 7.3 (Yury Malyshev)',
++ 'version': '7.3',
++ 'pretty_version': '7.3 (Yury Malyshev)',
++ 'best_version': '7.3',
++ 'major_version': '7',
++ 'minor_version': '3'
++ }
++ self._test_outcome(desired_outcome, 'cloudlinux', '7', 'redhat')
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestOverall(DistroTestCase):
++ """Test a LinuxDistribution object created with default arguments.
++
++ The direct accessor functions on that object are tested (e.g. `id()`); they
++ implement the precedence between the different sources of information.
++
++ In addition, because the distro release file is searched when not
++ specified, the information resulting from the distro release file is also
++ tested. The LSB and os-release sources are not tested again, because their
++ test is already done in TestLSBRelease and TestOSRelease, and their
++ algorithm does not depend on whether or not the file is specified.
++
++ TODO: This class should have testcases for all distros that are claimed
++ to be reliably maintained w.r.t. to their ID (see `id()`). Testcases for
++ the following distros are still missing:
++ * `amazon` - Amazon Linux
++ * `gentoo` - GenToo Linux
++ * `ibm_powerkvm` - IBM PowerKVM
++ * `parallels` - Parallels
++ * `pidora` - Pidora (Fedora remix for Raspberry Pi)
++ * `raspbian` - Raspbian
++ * `scientific` - Scientific Linux
++ * `xenserver` - XenServer
++ """
++
++ def setup_method(self, test_method):
++ super(TestOverall, self).setup_method(test_method)
++ dist = test_method.__name__.split('_')[1]
++ self._setup_for_distro(os.path.join(DISTROS_DIR, dist))
++ self.distro = distro.LinuxDistribution()
++
++ def _test_outcome(self, outcome):
++ assert self.distro.id() == outcome.get('id', '')
++ assert self.distro.name() == outcome.get('name', '')
++ assert self.distro.name(pretty=True) == outcome.get('pretty_name', '')
++ assert self.distro.version() == outcome.get('version', '')
++ assert self.distro.version(pretty=True) == \
++ outcome.get('pretty_version', '')
++ assert self.distro.version(best=True) == \
++ outcome.get('best_version', '')
++ assert self.distro.like() == outcome.get('like', '')
++ assert self.distro.codename() == outcome.get('codename', '')
++ assert self.distro.major_version() == outcome.get('major_version', '')
++ assert self.distro.minor_version() == outcome.get('minor_version', '')
++ assert self.distro.build_number() == outcome.get('build_number', '')
++
++ def _test_non_existing_release_file(self):
++ # Test the info from the searched distro release file
++ # does not have one.
++ assert self.distro.distro_release_file == ''
++ assert len(self.distro.distro_release_info()) == 0
++
++ def _test_release_file_info(self, filename, outcome):
++ # Test the info from the searched distro release file
++ assert os.path.basename(self.distro.distro_release_file) == filename
++ distro_info = self.distro.distro_release_info()
++ for key, value in outcome.items():
++ assert distro_info[key] == value
++ return distro_info
++
++ def test_arch_release(self):
++ desired_outcome = {
++ 'id': 'arch',
++ 'name': 'Arch Linux',
++ 'pretty_name': 'Arch Linux',
++ }
++ self._test_outcome(desired_outcome)
++
++ # Test the info from the searched distro release file
++ # Does not have one; The empty /etc/arch-release file is not
++ # considered a valid distro release file:
++ self._test_non_existing_release_file()
++
++ def test_centos5_release(self):
++ desired_outcome = {
++ 'id': 'centos',
++ 'name': 'CentOS',
++ 'pretty_name': 'CentOS 5.11 (Final)',
++ 'version': '5.11',
++ 'pretty_version': '5.11 (Final)',
++ 'best_version': '5.11',
++ 'codename': 'Final',
++ 'major_version': '5',
++ 'minor_version': '11'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'centos',
++ 'name': 'CentOS',
++ 'version_id': '5.11',
++ 'codename': 'Final'
++ }
++ self._test_release_file_info('centos-release', desired_info)
++
++ def test_centos7_release(self):
++ desired_outcome = {
++ 'id': 'centos',
++ 'name': 'CentOS Linux',
++ 'pretty_name': 'CentOS Linux 7 (Core)',
++ 'version': '7',
++ 'pretty_version': '7 (Core)',
++ 'best_version': '7.1.1503',
++ 'like': 'rhel fedora',
++ 'codename': 'Core',
++ 'major_version': '7'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'centos',
++ 'name': 'CentOS Linux',
++ 'version_id': '7.1.1503',
++ 'codename': 'Core'
++ }
++ self._test_release_file_info('centos-release', desired_info)
++
++ def test_coreos_release(self):
++ desired_outcome = {
++ 'id': 'coreos',
++ 'name': 'CoreOS',
++ 'pretty_name': 'CoreOS 899.15.0',
++ 'version': '899.15.0',
++ 'pretty_version': '899.15.0',
++ 'best_version': '899.15.0',
++ 'major_version': '899',
++ 'minor_version': '15',
++ 'build_number': '0'
++ }
++ self._test_outcome(desired_outcome)
++ self._test_non_existing_release_file()
++
++ def test_debian8_release(self):
++ desired_outcome = {
++ 'id': 'debian',
++ 'name': 'Debian GNU/Linux',
++ 'pretty_name': 'Debian GNU/Linux 8 (jessie)',
++ 'version': '8',
++ 'pretty_version': '8 (jessie)',
++ 'best_version': '8.2',
++ 'codename': 'jessie',
++ 'major_version': '8'
++ }
++ self._test_outcome(desired_outcome)
++ self._test_non_existing_release_file()
++
++ def test_exherbo_release(self):
++ desired_outcome = {
++ 'id': 'exherbo',
++ 'name': 'Exherbo',
++ 'pretty_name': 'Exherbo Linux',
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_fedora19_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': u'Fedora 19 (Schr\u00F6dinger\u2019s Cat)',
++ 'version': '19',
++ 'pretty_version': u'19 (Schr\u00F6dinger\u2019s Cat)',
++ 'best_version': '19',
++ 'codename': u'Schr\u00F6dinger\u2019s Cat',
++ 'major_version': '19'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'version_id': '19',
++ 'codename': u'Schr\u00F6dinger\u2019s Cat'
++ }
++ self._test_release_file_info('fedora-release', desired_info)
++
++ def test_fedora23_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': 'Fedora 23 (Twenty Three)',
++ 'version': '23',
++ 'pretty_version': '23 (Twenty Three)',
++ 'best_version': '23',
++ 'codename': 'Twenty Three',
++ 'major_version': '23'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'version_id': '23',
++ 'codename': 'Twenty Three'
++ }
++ self._test_release_file_info('fedora-release', desired_info)
++
++ def test_fedora30_release(self):
++ desired_outcome = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'pretty_name': 'Fedora 30 (Thirty)',
++ 'version': '30',
++ 'pretty_version': '30',
++ 'best_version': '30',
++ 'codename': '',
++ 'major_version': '30'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'fedora',
++ 'name': 'Fedora',
++ 'version_id': '30',
++ 'codename': 'Thirty'
++ }
++ self._test_release_file_info('fedora-release', desired_info)
++
++ def test_kvmibm1_release(self):
++ desired_outcome = {
++ 'id': 'kvmibm',
++ 'name': 'KVM for IBM z Systems',
++ 'pretty_name': 'KVM for IBM z Systems 1.1.1 (Z)',
++ 'version': '1.1.1',
++ 'pretty_version': '1.1.1 (Z)',
++ 'best_version': '1.1.1',
++ 'like': 'rhel fedora',
++ 'codename': 'Z',
++ 'major_version': '1',
++ 'minor_version': '1',
++ 'build_number': '1'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'base',
++ 'name': 'KVM for IBM z Systems',
++ 'version_id': '1.1.1',
++ 'codename': 'Z'
++ }
++ self._test_release_file_info('base-release', desired_info)
++
++ def test_linuxmint17_release(self):
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 14.04.3 LTS',
++ 'version': '14.04',
++ 'pretty_version': '14.04 (Trusty Tahr)',
++ 'best_version': '14.04.3',
++ 'like': 'debian',
++ 'codename': 'Trusty Tahr',
++ 'major_version': '14',
++ 'minor_version': '04'
++ }
++ self._test_outcome(desired_outcome)
++ self._test_non_existing_release_file()
++
++ def test_mageia5_release(self):
++ desired_outcome = {
++ 'id': 'mageia',
++ 'name': 'Mageia',
++ 'pretty_name': 'Mageia 5',
++ 'version': '5',
++ 'pretty_version': '5 (thornicroft)',
++ 'best_version': '5',
++ 'like': 'mandriva fedora',
++ # TODO: Codename differs between distro release and lsb_release.
++ 'codename': 'thornicroft',
++ 'major_version': '5'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'mageia',
++ 'name': 'Mageia',
++ 'version_id': '5',
++ 'codename': 'Official'
++ }
++ self._test_release_file_info('mageia-release', desired_info)
++
++ def test_manjaro1512_release(self):
++ self._test_outcome({
++ 'id': 'manjaro',
++ 'name': 'Manjaro Linux',
++ 'pretty_name': 'Manjaro Linux',
++ 'version': '15.12',
++ 'pretty_version': '15.12 (Capella)',
++ 'best_version': '15.12',
++ 'major_version': '15',
++ 'minor_version': '12',
++ 'codename': 'Capella'
++ })
++
++ self._test_release_file_info(
++ 'manjaro-release',
++ {'id': 'manjaro',
++ 'name': 'Manjaro Linux'})
++
++ def test_opensuse42_release(self):
++ desired_outcome = {
++ 'id': 'opensuse',
++ 'name': 'openSUSE Leap',
++ 'pretty_name': 'openSUSE Leap 42.1 (x86_64)',
++ 'version': '42.1',
++ 'pretty_version': '42.1 (x86_64)',
++ 'best_version': '42.1',
++ 'like': 'suse',
++ 'codename': 'x86_64',
++ 'major_version': '42',
++ 'minor_version': '1'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'SuSE',
++ 'name': 'openSUSE',
++ 'version_id': '42.1',
++ 'codename': 'x86_64'
++ }
++ self._test_release_file_info('SuSE-release', desired_info)
++
++ def test_oracle7_release(self):
++ desired_outcome = {
++ 'id': 'oracle',
++ 'name': 'Oracle Linux Server',
++ 'pretty_name': 'Oracle Linux Server 7.5',
++ 'version': '7.5',
++ 'pretty_version': '7.5',
++ 'best_version': '7.5',
++ 'major_version': '7',
++ 'minor_version': '5'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'oracle',
++ 'name': 'Oracle Linux Server',
++ 'version_id': '7.5',
++ }
++ distro_info = self._test_release_file_info(
++ 'oracle-release', desired_info)
++ assert 'codename' not in distro_info
++
++ def test_raspbian7_release(self):
++ desired_outcome = {
++ 'id': 'raspbian',
++ 'name': 'Raspbian GNU/Linux',
++ 'pretty_name': 'Raspbian GNU/Linux 7 (wheezy)',
++ 'version': '7',
++ 'pretty_version': '7 (wheezy)',
++ 'best_version': '7',
++ 'like': 'debian',
++ 'codename': 'wheezy',
++ 'major_version': '7',
++ }
++ self._test_outcome(desired_outcome)
++ self._test_non_existing_release_file()
++
++ def test_raspbian8_release(self):
++ desired_outcome = {
++ 'id': 'raspbian',
++ 'name': 'Raspbian GNU/Linux',
++ 'pretty_name': 'Raspbian GNU/Linux 8 (jessie)',
++ 'version': '8',
++ 'pretty_version': '8 (jessie)',
++ 'best_version': '8',
++ 'like': 'debian',
++ 'codename': 'jessie',
++ 'major_version': '8',
++ }
++ self._test_outcome(desired_outcome)
++ self._test_non_existing_release_file()
++
++ def test_rhel5_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'pretty_name': 'Red Hat Enterprise Linux Server 5.11 (Tikanga)',
++ 'version': '5.11',
++ 'pretty_version': '5.11 (Tikanga)',
++ 'best_version': '5.11',
++ 'codename': 'Tikanga',
++ 'major_version': '5',
++ 'minor_version': '11'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'redhat',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'version_id': '5.11',
++ 'codename': 'Tikanga'
++ }
++ self._test_release_file_info('redhat-release', desired_info)
++
++ def test_rhel6_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'pretty_name': 'Red Hat Enterprise Linux Server 6.5 (Santiago)',
++ 'version': '6.5',
++ 'pretty_version': '6.5 (Santiago)',
++ 'best_version': '6.5',
++ 'codename': 'Santiago',
++ 'major_version': '6',
++ 'minor_version': '5'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'redhat',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'version_id': '6.5',
++ 'codename': 'Santiago'
++ }
++ self._test_release_file_info('redhat-release', desired_info)
++
++ def test_rhel7_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'pretty_name': 'Red Hat Enterprise Linux Server 7.0 (Maipo)',
++ 'version': '7.0',
++ 'pretty_version': '7.0 (Maipo)',
++ 'best_version': '7.0',
++ 'like': 'fedora',
++ 'codename': 'Maipo',
++ 'major_version': '7',
++ 'minor_version': '0'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'redhat',
++ 'name': 'Red Hat Enterprise Linux Server',
++ 'version_id': '7.0',
++ 'codename': 'Maipo'
++ }
++ self._test_release_file_info('redhat-release', desired_info)
++
++ def test_slackware14_release(self):
++ desired_outcome = {
++ 'id': 'slackware',
++ 'name': 'Slackware',
++ 'pretty_name': 'Slackware 14.1',
++ 'version': '14.1',
++ 'pretty_version': '14.1',
++ 'best_version': '14.1',
++ 'major_version': '14',
++ 'minor_version': '1'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'slackware',
++ 'name': 'Slackware',
++ 'version_id': '14.1',
++ }
++ distro_info = self._test_release_file_info(
++ 'slackware-version', desired_info)
++ assert 'codename' not in distro_info
++
++ def test_sles12_release(self):
++ desired_outcome = {
++ 'id': 'sles',
++ 'name': 'SLES',
++ 'pretty_name': 'SUSE Linux Enterprise Server 12 SP1',
++ 'version': '12.1',
++ 'pretty_version': '12.1 (n/a)',
++ 'best_version': '12.1',
++ 'codename': 'n/a',
++ 'major_version': '12',
++ 'minor_version': '1'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'SuSE',
++ 'name': 'SUSE Linux Enterprise Server',
++ 'version_id': '12',
++ 'codename': 's390x'
++ }
++ self._test_release_file_info('SuSE-release', desired_info)
++
++ def test_ubuntu14_release(self):
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 14.04.3 LTS',
++ 'version': '14.04',
++ 'pretty_version': '14.04 (Trusty Tahr)',
++ 'best_version': '14.04.3',
++ 'like': 'debian',
++ 'codename': 'Trusty Tahr',
++ 'major_version': '14',
++ 'minor_version': '04'
++ }
++ self._test_outcome(desired_outcome)
++
++ # Test the info from the searched distro release file
++ # Does not have one; /etc/debian_version is not considered a distro
++ # release file:
++ self._test_non_existing_release_file()
++
++ def test_ubuntu16_release(self):
++ desired_outcome = {
++ 'id': 'ubuntu',
++ 'name': 'Ubuntu',
++ 'pretty_name': 'Ubuntu 16.04.1 LTS',
++ 'version': '16.04',
++ 'pretty_version': '16.04 (xenial)',
++ 'best_version': '16.04.1',
++ 'like': 'debian',
++ 'codename': 'xenial',
++ 'major_version': '16',
++ 'minor_version': '04'
++ }
++ self._test_outcome(desired_outcome)
++
++ # Test the info from the searched distro release file
++ # Does not have one; /etc/debian_version is not considered a distro
++ # release file:
++ self._test_non_existing_release_file()
++
++ def test_amazon2016_release(self):
++ desired_outcome = {
++ 'id': 'amzn',
++ 'name': 'Amazon Linux AMI',
++ 'pretty_name': 'Amazon Linux AMI 2016.03',
++ 'version': '2016.03',
++ 'pretty_version': '2016.03',
++ 'best_version': '2016.03',
++ 'like': 'rhel fedora',
++ 'major_version': '2016',
++ 'minor_version': '03'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_amazon2014_release(self):
++ # Amazon Linux 2014 only contains a system-release file.
++ # distro doesn't currently handle it.
++ desired_outcome = {}
++ self._test_outcome(desired_outcome)
++
++ def test_scientific6_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Scientific Linux',
++ 'pretty_name': 'Scientific Linux 6.4 (Carbon)',
++ 'version': '6.4',
++ 'pretty_version': '6.4 (Carbon)',
++ 'best_version': '6.4',
++ 'codename': 'Carbon',
++ 'major_version': '6',
++ 'minor_version': '4',
++
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'redhat',
++ 'name': 'Scientific Linux',
++ 'version_id': '6.4',
++ 'codename': 'Carbon'
++ }
++ self._test_release_file_info('redhat-release', desired_info)
++
++ def test_scientific7_release(self):
++ desired_outcome = {
++ 'id': 'rhel',
++ 'name': 'Scientific Linux',
++ 'pretty_name': 'Scientific Linux 7.2 (Nitrogen)',
++ 'version': '7.2',
++ 'pretty_version': '7.2 (Nitrogen)',
++ 'best_version': '7.2',
++ 'like': 'fedora',
++ 'codename': 'Nitrogen',
++ 'major_version': '7',
++ 'minor_version': '2',
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'redhat',
++ 'name': 'Scientific Linux',
++ 'version_id': '7.2',
++ 'codename': 'Nitrogen'
++ }
++ self._test_release_file_info('redhat-release', desired_info)
++
++ def test_gentoo_release(self):
++ desired_outcome = {
++ 'id': 'gentoo',
++ 'name': 'Gentoo',
++ 'pretty_name': 'Gentoo/Linux',
++ 'version': '2.2',
++ 'pretty_version': '2.2',
++ 'best_version': '2.2',
++ 'major_version': '2',
++ 'minor_version': '2',
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'gentoo',
++ 'name': 'Gentoo Base System',
++ 'version_id': '2.2',
++ }
++ self._test_release_file_info('gentoo-release', desired_info)
++
++ def test_openelec6_release(self):
++ desired_outcome = {
++ 'id': 'openelec',
++ 'name': 'OpenELEC',
++ 'pretty_name': 'OpenELEC (official) - Version: 6.0.3',
++ 'version': '6.0',
++ 'pretty_version': '6.0',
++ 'best_version': '6.0.3',
++ 'major_version': '6',
++ 'minor_version': '0',
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_mandriva2011_release(self):
++ desired_outcome = {
++ 'id': 'mandrivalinux',
++ 'name': 'MandrivaLinux',
++ 'pretty_name': 'Mandriva Linux 2011.0',
++ 'version': '2011.0',
++ 'pretty_version': '2011.0 (turtle)',
++ 'best_version': '2011.0',
++ 'major_version': '2011',
++ 'minor_version': '0',
++ 'codename': 'turtle'
++ }
++ self._test_outcome(desired_outcome)
++
++ desired_info = {
++ 'id': 'mandrake',
++ 'name': 'Mandriva Linux',
++ 'version_id': '2011.0',
++ }
++ self._test_release_file_info('mandrake-release', desired_info)
++
++ def test_cloudlinux5_release(self):
++ # Uses redhat-release only to get information.
++ # The id of 'rhel' can only be fixed with issue #109.
++ desired_outcome = {
++ 'id': 'cloudlinux',
++ 'codename': 'Vladislav Volkov',
++ 'name': 'CloudLinux Server',
++ 'pretty_name': 'CloudLinux Server 5.11 (Vladislav Volkov)',
++ 'version': '5.11',
++ 'pretty_version': '5.11 (Vladislav Volkov)',
++ 'best_version': '5.11',
++ 'major_version': '5',
++ 'minor_version': '11'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_cloudlinux6_release(self):
++ # Same as above, only has redhat-release.
++ desired_outcome = {
++ 'id': 'cloudlinux',
++ 'codename': 'Oleg Makarov',
++ 'name': 'CloudLinux Server',
++ 'pretty_name': 'CloudLinux Server 6.8 (Oleg Makarov)',
++ 'version': '6.8',
++ 'pretty_version': '6.8 (Oleg Makarov)',
++ 'best_version': '6.8',
++ 'major_version': '6',
++ 'minor_version': '8'
++ }
++ self._test_outcome(desired_outcome)
++
++ def test_cloudlinux7_release(self):
++ desired_outcome = {
++ 'id': 'cloudlinux',
++ 'codename': 'Yury Malyshev',
++ 'name': 'CloudLinux',
++ 'pretty_name': 'CloudLinux 7.3 (Yury Malyshev)',
++ 'like': 'rhel fedora centos',
++ 'version': '7.3',
++ 'pretty_version': '7.3 (Yury Malyshev)',
++ 'best_version': '7.3',
++ 'major_version': '7',
++ 'minor_version': '3'
++ }
++ self._test_outcome(desired_outcome)
++
++
++def _bad_os_listdir(path='.'):
++ """ This function is used by TestOverallWithEtcNotReadable to simulate
++ a folder that cannot be called with os.listdir() but files are still
++ readable. Forces distro to guess which *-release files are available. """
++ raise OSError()
++
++
++@pytest.mark.skipIf(not IS_LINUX, reason='Irrelevant on non-linx')
++class TestOverallWithEtcNotReadable(TestOverall):
++ def setup_method(self, test_method):
++ self._old_listdir = os.listdir
++ os.listdir = _bad_os_listdir
++ super(TestOverallWithEtcNotReadable, self).setup_method(test_method)
++
++ def teardown_method(self, test_method):
++ super(TestOverallWithEtcNotReadable, self).teardown_method(test_method)
++ if os.listdir is _bad_os_listdir:
++ os.listdir = self._old_listdir
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestGetAttr(DistroTestCase):
++ """Test the consistency between the results of
++ `{source}_release_attr()` and `{source}_release_info()` for all
++ distros in `DISTROS`.
++ """
++
++ def _test_attr(self, info_method, attr_method):
++ for dist in DISTROS:
++ self._setup_for_distro(os.path.join(DISTROS_DIR, dist))
++ _distro = distro.LinuxDistribution()
++ info = getattr(_distro, info_method)()
++ for key in info.keys():
++ try:
++ assert info[key] == getattr(_distro, attr_method)(key)
++ except AssertionError:
++ print("distro: {0}, key: {1}".format(dist, key))
++
++ def test_os_release_attr(self):
++ self._test_attr('os_release_info', 'os_release_attr')
++
++ def test_lsb_release_attr(self):
++ self._test_attr('lsb_release_info', 'lsb_release_attr')
++
++ def test_distro_release_attr(self):
++ self._test_attr('distro_release_info', 'distro_release_attr')
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestInfo(DistroTestCase):
++
++ def setup_method(self, test_method):
++ super(TestInfo, self).setup_method(test_method)
++ self.ubuntu14_os_release = os.path.join(
++ DISTROS_DIR, 'ubuntu14', 'etc', 'os-release')
++
++ def test_info(self):
++ _distro = distro.LinuxDistribution(
++ False, self.ubuntu14_os_release, 'non')
++
++ desired_info = {
++ 'id': 'ubuntu',
++ 'version': '14.04',
++ 'like': 'debian',
++ 'version_parts': {
++ 'major': '14',
++ 'minor': '04',
++ 'build_number': ''
++ },
++ 'codename': 'Trusty Tahr'
++ }
++
++ info = _distro.info()
++ assert info == desired_info
++
++ desired_info_diff = {
++ 'version': '14.04 (Trusty Tahr)'
++ }
++ desired_info.update(desired_info_diff)
++ info = _distro.info(pretty=True)
++ assert info == desired_info
++
++ desired_info_diff = {
++ 'version': '14.04.3',
++ 'version_parts': {
++ 'major': '14',
++ 'minor': '04',
++ 'build_number': '3'
++ }
++ }
++ desired_info.update(desired_info_diff)
++ info = _distro.info(best=True)
++ assert info == desired_info
++
++ desired_info_diff = {
++ 'version': '14.04.3 (Trusty Tahr)'
++ }
++ desired_info.update(desired_info_diff)
++ info = _distro.info(pretty=True, best=True)
++ assert info == desired_info
++
++ def test_none(self):
++
++ def _test_none(info):
++ assert info['id'] == ''
++ assert info['version'] == ''
++ assert info['like'] == ''
++ assert info['version_parts']['major'] == ''
++ assert info['version_parts']['minor'] == ''
++ assert info['version_parts']['build_number'] == ''
++ assert info['codename'] == ''
++
++ _distro = distro.LinuxDistribution(False, 'non', 'non')
++
++ info = _distro.info()
++ _test_none(info)
++
++ info = _distro.info(best=True)
++ _test_none(info)
++
++ info = _distro.info(pretty=True)
++ _test_none(info)
++
++ info = _distro.info(pretty=True, best=True)
++ _test_none(info)
++
++ def test_linux_distribution(self):
++ _distro = distro.LinuxDistribution(False, self.ubuntu14_os_release)
++ i = _distro.linux_distribution()
++ assert i == ('Ubuntu', '14.04', 'Trusty Tahr')
++
++ def test_linux_distribution_full_false(self):
++ _distro = distro.LinuxDistribution(False, self.ubuntu14_os_release)
++ i = _distro.linux_distribution(full_distribution_name=False)
++ assert i == ('ubuntu', '14.04', 'Trusty Tahr')
++
++ def test_all(self):
++ """Test info() by comparing its results with the results of specific
++ consolidated accessor functions.
++ """
++ def _test_all(info, best=False, pretty=False):
++ assert info['id'] == _distro.id()
++ assert info['version'] == _distro.version(pretty=pretty, best=best)
++ assert info['version_parts']['major'] == \
++ _distro.major_version(best=best)
++ assert info['version_parts']['minor'] == \
++ _distro.minor_version(best=best)
++ assert info['version_parts']['build_number'] == \
++ _distro.build_number(best=best)
++ assert info['like'] == _distro.like()
++ assert info['codename'] == _distro.codename()
++ assert len(info['version_parts']) == 3
++ assert len(info) == 5
++
++ for dist in DISTROS:
++ self._setup_for_distro(os.path.join(DISTROS_DIR, dist))
++
++ _distro = distro.LinuxDistribution()
++
++ info = _distro.info()
++ _test_all(info)
++
++ info = _distro.info(best=True)
++ _test_all(info, best=True)
++
++ info = _distro.info(pretty=True)
++ _test_all(info, pretty=True)
++
++ info = _distro.info(pretty=True, best=True)
++ _test_all(info, pretty=True, best=True)
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestOSReleaseParsing:
++ """Test the parsing of os-release files.
++ """
++
++ def setup_method(self, test_method):
++ self.distro = distro.LinuxDistribution(False, None, None)
++ self.distro.debug = True
++
++ def _get_props(self, input):
++ return self.distro._parse_os_release_content(StringIO(
++ input,
++ ))
++
++ def _test_zero_length_props(self, input):
++ props = self._get_props(input)
++ assert len(props) == 0
++
++ def _test_empty_value(self, input):
++ props = self._get_props(input)
++ assert props.get('key', None) == ''
++
++ def _test_parsed_value(self, input):
++ props = self._get_props(input)
++ assert props.get('key', None) == 'value'
++
++ def test_kv_01_empty_file(self):
++ self._test_zero_length_props('')
++
++ def test_kv_02_empty_line(self):
++ self._test_zero_length_props('\n')
++
++ def test_kv_03_empty_line_with_crlf(self):
++ self._test_zero_length_props('\r\n')
++
++ def test_kv_04_empty_line_with_just_cr(self):
++ self._test_zero_length_props('\r')
++
++ def test_kv_05_comment(self):
++ self._test_zero_length_props('# KEY=value\n')
++
++ def test_kv_06_empty_value(self):
++ self._test_empty_value('KEY=\n')
++
++ def test_kv_07_empty_value_single_quoted(self):
++ self._test_empty_value('KEY=\'\'\n')
++
++ def test_kv_08_empty_value_double_quoted(self):
++ self._test_empty_value('KEY=""\n')
++
++ def test_kv_09_word(self):
++ self._test_parsed_value('KEY=value\n')
++
++ def test_kv_10_word_no_newline(self):
++ self._test_parsed_value('KEY=value')
++
++ def test_kv_11_word_with_crlf(self):
++ self._test_parsed_value('KEY=value\r\n')
++
++ def test_kv_12_word_with_just_cr(self):
++ self._test_parsed_value('KEY=value\r')
++
++ def test_kv_13_word_with_multi_blanks(self):
++ self._test_empty_value('KEY= cmd \n')
++ # Note: Without quotes, this assigns the empty string, and 'cmd' is
++ # a separate token that is being ignored (it would be a command
++ # in the shell).
++
++ def test_kv_14_unquoted_words(self):
++ self._test_parsed_value('KEY=value cmd\n')
++
++ def test_kv_15_double_quoted_words(self):
++ props = self._get_props('KEY="a simple value" cmd\n')
++ assert props.get('key', None) == 'a simple value'
++
++ def test_kv_16_double_quoted_words_with_multi_blanks(self):
++ props = self._get_props('KEY=" a simple value "\n')
++ assert props.get('key', None) == ' a simple value '
++
++ def test_kv_17_double_quoted_word_with_single_quote(self):
++ props = self._get_props('KEY="it\'s value"\n')
++ assert props.get('key', None) == 'it\'s value'
++
++ def test_kv_18_double_quoted_word_with_double_quote(self):
++ props = self._get_props('KEY="a \\"bold\\" move"\n')
++ assert props.get('key', None) == 'a "bold" move'
++
++ def test_kv_19_single_quoted_words(self):
++ props = self._get_props('KEY=\'a simple value\'\n')
++ assert props.get('key', None) == 'a simple value'
++
++ def test_kv_20_single_quoted_words_with_multi_blanks(self):
++ props = self._get_props('KEY=\' a simple value \'\n')
++ assert props.get('key', None) == ' a simple value '
++
++ def test_kv_21_single_quoted_word_with_double_quote(self):
++ props = self._get_props('KEY=\'a "bold" move\'\n')
++ assert props.get('key', None) == 'a "bold" move'
++
++ def test_kv_22_quoted_unicode_wordchar(self):
++ # "wordchar" means it is in the shlex.wordchars variable.
++ props = self._get_props(u'KEY="wordchar: \u00CA (E accent grave)"\n')
++ assert props.get('key', None) == u'wordchar: \u00CA (E accent grave)'
++
++ def test_kv_23_quoted_unicode_non_wordchar(self):
++ # "non-wordchar" means it is not in the shlex.wordchars variable.
++ props = self._get_props(
++ u'KEY="non-wordchar: \u00A1 (inverted exclamation mark)"\n')
++ assert (props.get('key', None) ==
++ u'non-wordchar: \u00A1 (inverted exclamation mark)')
++
++ def test_kv_24_double_quoted_entire_single_quoted_word(self):
++ props = self._get_props('KEY="\'value\'"\n')
++ assert props.get('key', None) == "'value'"
++
++ def test_kv_25_single_quoted_entire_double_quoted_word(self):
++ props = self._get_props('KEY=\'"value"\'\n')
++ assert props.get('key', None) == '"value"'
++
++ def test_kv_26_double_quoted_multiline(self):
++ props = self.distro._parse_os_release_content(StringIO(
++ 'KEY="a multi\n'
++ 'line value"\n'
++ ))
++ assert props.get('key', None) == 'a multi\nline value'
++ # TODO: Find out why the result is not 'a multi line value'
++
++ def test_kv_27_double_quoted_multiline_2(self):
++ props = self._get_props('KEY=\' a simple value \'\n')
++ props = self.distro._parse_os_release_content(StringIO(
++ 'KEY="a multi\n'
++ 'line=value"\n'
++ ))
++ assert props.get('key', None) == 'a multi\nline=value'
++ # TODO: Find out why the result is not 'a multi line=value'
++
++ def test_kv_28_double_quoted_word_with_equal(self):
++ props = self._get_props('KEY="var=value"\n')
++ assert props.get('key', None) == 'var=value'
++
++ def test_kv_29_single_quoted_word_with_equal(self):
++ props = self._get_props('KEY=\'var=value\'\n')
++ assert props.get('key', None) == 'var=value'
++
++ def test_kx_01(self):
++ props = self.distro._parse_os_release_content(StringIO(
++ 'KEY1=value1\n'
++ 'KEY2="value 2"\n'
++ ))
++ assert props.get('key1', None) == 'value1'
++ assert props.get('key2', None) == 'value 2'
++
++ def test_kx_02(self):
++ props = self.distro._parse_os_release_content(StringIO(
++ '# KEY1=value1\n'
++ 'KEY2="value 2"\n'
++ ))
++ assert props.get('key1', None) is None
++ assert props.get('key2', None) == 'value 2'
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestGlobal:
++ """Test the global module-level functions, and default values of their
++ arguments.
++ """
++
++ def setup_method(self, test_method):
++ pass
++
++ def test_global(self):
++ # Because the module-level functions use the module-global
++ # LinuxDistribution instance, it would influence the tested
++ # code too much if we mocked that in order to use the distro
++ # specific release files. Instead, we let the functions use
++ # the release files of the distro this test runs on, and
++ # compare the result of the global functions with the result
++ # of the methods on the global LinuxDistribution object.
++
++ def _test_consistency(function, kwargs=None):
++ kwargs = kwargs or {}
++ method_result = getattr(MODULE_DISTRO, function)(**kwargs)
++ function_result = getattr(distro, function)(**kwargs)
++ assert method_result == function_result
++
++ kwargs = {'full_distribution_name': True}
++ _test_consistency('linux_distribution', kwargs)
++ kwargs = {'full_distribution_name': False}
++ _test_consistency('linux_distribution', kwargs)
++
++ kwargs = {'pretty': False}
++ _test_consistency('name', kwargs)
++ _test_consistency('version', kwargs)
++ _test_consistency('info', kwargs)
++
++ kwargs = {'pretty': True}
++ _test_consistency('name', kwargs)
++ _test_consistency('version', kwargs)
++ _test_consistency('info', kwargs)
++
++ kwargs = {'best': False}
++ _test_consistency('version', kwargs)
++ _test_consistency('version_parts', kwargs)
++ _test_consistency('major_version', kwargs)
++ _test_consistency('minor_version', kwargs)
++ _test_consistency('build_number', kwargs)
++ _test_consistency('info', kwargs)
++
++ kwargs = {'best': True}
++ _test_consistency('version', kwargs)
++ _test_consistency('version_parts', kwargs)
++ _test_consistency('major_version', kwargs)
++ _test_consistency('minor_version', kwargs)
++ _test_consistency('build_number', kwargs)
++ _test_consistency('info', kwargs)
++
++ _test_consistency('id')
++ _test_consistency('like')
++ _test_consistency('codename')
++ _test_consistency('info')
++
++ _test_consistency('os_release_info')
++ _test_consistency('lsb_release_info')
++ _test_consistency('distro_release_info')
++ _test_consistency('uname_info')
++
++ os_release_keys = [
++ 'name',
++ 'version',
++ 'id',
++ 'id_like',
++ 'pretty_name',
++ 'version_id',
++ 'codename',
++ ]
++ for key in os_release_keys:
++ _test_consistency('os_release_attr', {'attribute': key})
++
++ lsb_release_keys = [
++ 'distributor_id',
++ 'description',
++ 'release',
++ 'codename',
++ ]
++ for key in lsb_release_keys:
++ _test_consistency('lsb_release_attr', {'attribute': key})
++
++ distro_release_keys = [
++ 'id',
++ 'name',
++ 'version_id',
++ 'codename',
++ ]
++ for key in distro_release_keys:
++ _test_consistency('distro_release_attr', {'attribute': key})
++
++ uname_keys = [
++ 'id',
++ 'name',
++ 'release'
++ ]
++ for key in uname_keys:
++ _test_consistency('uname_attr', {'attribute': key})
++
++
++@pytest.mark.skipif(not IS_LINUX, reason='Irrelevant on non-linux')
++class TestRepr:
++ """Test the __repr__() method.
++ """
++
++ def test_repr(self):
++ # We test that the class name and the names of all instance attributes
++ # show up in the repr() string.
++ repr_str = repr(distro._distro)
++ assert "LinuxDistribution" in repr_str
++ for attr in MODULE_DISTRO.__dict__.keys():
++ assert attr + '=' in repr_str
+diff --git a/third_party/python/enum34/enum/doc/enum.pdf b/third_party/python/enum34/enum/doc/enum.pdf
+--- a/third_party/python/enum34/enum/doc/enum.pdf
++++ b/third_party/python/enum34/enum/doc/enum.pdf
+@@ -1,10 +1,10 @@
+ %PDF-1.4
+-%æ±äº¬ ReportLab Generated PDF document http://www.reportlab.com
++%“Œ‹ž ReportLab Generated PDF document http://www.reportlab.com
+ 1 0 obj
+ << /F1 2 0 R /F2 3 0 R /F3 4 0 R /F4 5 0 R /F5 8 0 R /F6 15 0 R >>
+ endobj
+ 2 0 obj
+ << /BaseFont /Helvetica /Encoding /WinAnsiEncoding /Name /F1 /Subtype /Type1 /Type /Font >>
+ endobj
+ 3 0 obj
+ << /BaseFont /Courier-Bold /Encoding /WinAnsiEncoding /Name /F2 /Subtype /Type1 /Type /Font >>
+diff --git a/third_party/python/psutil/psutil/_pslinux.py b/third_party/python/psutil/psutil/_pslinux.py
+--- a/third_party/python/psutil/psutil/_pslinux.py
++++ b/third_party/python/psutil/psutil/_pslinux.py
+@@ -1046,30 +1046,28 @@ def disk_io_counters():
+ # On Linux 2.4 each line has always 15 fields, e.g.:
+ # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8"
+ # On Linux 2.6+ each line *usually* has 14 fields, and the disk
+ # name is in another position, like this:
+ # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8"
+ # ...unless (Linux 2.6) the line refers to a partition instead
+ # of a disk, in which case the line has less fields (7):
+ # "3 1 hda1 8 8 8 8"
+- # 4.18+ has 4 fields added:
+- # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0"
+ # See:
+ # https://www.kernel.org/doc/Documentation/iostats.txt
+ # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
+ fields = line.split()
+ fields_len = len(fields)
+ if fields_len == 15:
+ # Linux 2.4
+ name = fields[3]
+ reads = int(fields[2])
+ (reads_merged, rbytes, rtime, writes, writes_merged,
+ wbytes, wtime, _, busy_time, _) = map(int, fields[4:14])
+- elif fields_len == 14 or fields_len == 18:
++ elif fields_len == 14:
+ # Linux 2.6+, line referring to a disk
+ name = fields[2]
+ (reads, reads_merged, rbytes, rtime, writes, writes_merged,
+ wbytes, wtime, _, busy_time, _) = map(int, fields[3:14])
+ elif fields_len == 7:
+ # Linux 2.6+, line referring to a partition
+ name = fields[2]
+ reads, rbytes, writes, wbytes = map(int, fields[3:])
+diff --git a/third_party/python/requirements.in b/third_party/python/requirements.in
+--- a/third_party/python/requirements.in
++++ b/third_party/python/requirements.in
+@@ -1,11 +1,12 @@
+ attrs==18.1.0
+ biplist==1.0.3
+ blessings==1.7
++distro==1.4.0
+ jsmin==2.1.0
+ json-e==2.7.0
+ mozilla-version==0.3.0
+ pathlib2==2.3.2
+ pip-tools==3.0.0
+ pipenv==2018.5.18
+ psutil==5.4.3
+ pytest==3.6.2
+diff --git a/third_party/python/requirements.txt b/third_party/python/requirements.txt
+--- a/third_party/python/requirements.txt
++++ b/third_party/python/requirements.txt
+@@ -14,16 +14,19 @@ blessings==1.7 \
+ certifi==2018.4.16 \
+ --hash=sha256:13e698f54293db9f89122b0581843a782ad0934a4fe0172d2a980ba77fc61bb7 \
+ --hash=sha256:9fa520c1bacfb634fa7af20a76bcbd3d5fb390481724c597da32c719a7dca4b0 \
+ # via pipenv
+ click==7.0 \
+ --hash=sha256:2335065e6395b9e67ca716de5f7526736bfa6ceead690adf616d925bdc622b13 \
+ --hash=sha256:5b94b49521f6456670fdb30cd82a4eca9412788a93fa6dd6df72c94d5a8ff2d7 \
+ # via pip-tools
++distro==1.4.0 \
++ --hash=sha256:362dde65d846d23baee4b5c058c8586f219b5a54be1cf5fc6ff55c4578392f57 \
++ --hash=sha256:eedf82a470ebe7d010f1872c17237c79ab04097948800029994fa458e52fb4b4
+ enum34==1.1.6 \
+ --hash=sha256:2d81cbbe0e73112bdfe6ef8576f2238f2ba27dd0d55752a776c41d38b7da2850 \
+ --hash=sha256:644837f692e5f550741432dd3f223bbb9852018674981b1664e5dc339387588a \
+ --hash=sha256:6bd0f6ad48ec2aa117d3d141940d484deccda84d4fcd884f5c3d93c23ecd8c79 \
+ --hash=sha256:8ad8c4783bf61ded74527bffb48ed9b54166685e4230386a9ed9b1279e2df5b1 \
+ # via mozilla-version
+ funcsigs==1.0.2 \
+ --hash=sha256:330cc27ccbf7f1e992e69fef78261dc7c6569012cf397db8d3de0234e6c937ca \
+
diff --git a/libre/iceweasel/firefox-70.0-update-bindgen.patch b/libre/iceweasel/firefox-70.0-update-bindgen.patch
new file mode 100644
index 000000000..804e56c0c
--- /dev/null
+++ b/libre/iceweasel/firefox-70.0-update-bindgen.patch
@@ -0,0 +1,23946 @@
+diff --git a/Cargo.lock b/Cargo.lock
+--- a/Cargo.lock
++++ b/Cargo.lock
+@@ -188,7 +188,7 @@
+ name = "baldrdash"
+ version = "0.1.0"
+ dependencies = [
+- "bindgen 0.51.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cranelift-codegen 0.40.0 (git+https://github.com/CraneStation/Cranelift?rev=fc88520b88bcaad4e4a92f28a5e17347af20edbd)",
+ "cranelift-wasm 0.40.0 (git+https://github.com/CraneStation/Cranelift?rev=fc88520b88bcaad4e4a92f28a5e17347af20edbd)",
+ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
+@@ -241,21 +241,20 @@
+
+ [[package]]
+ name = "bindgen"
+-version = "0.51.0"
++version = "0.51.1"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+ dependencies = [
+ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cfg-if 0.1.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "clang-sys 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)",
+- "fxhash 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
+- "proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)",
+- "quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)",
++ "proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
++ "quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "regex 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
++ "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
+- "which 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)",
+ ]
+
+ [[package]]
+@@ -559,7 +558,7 @@
+ name = "coreaudio-sys"
+ version = "0.2.2"
+ dependencies = [
+- "bindgen 0.51.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ ]
+
+ [[package]]
+@@ -1473,7 +1472,7 @@
+ name = "js"
+ version = "0.1.4"
+ dependencies = [
+- "bindgen 0.51.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cmake 0.1.29 (registry+https://github.com/rust-lang/crates.io-index)",
+ "env_logger 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "glob 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)",
+@@ -2253,6 +2252,14 @@
+ ]
+
+ [[package]]
++name = "proc-macro2"
++version = "1.0.4"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
+ name = "procedural-masquerade"
+ version = "0.1.1"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+@@ -2298,6 +2305,14 @@
+ ]
+
+ [[package]]
++name = "quote"
++version = "1.0.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
+ name = "rand"
+ version = "0.6.5"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+@@ -2556,6 +2571,14 @@
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+
+ [[package]]
++name = "rustc-hash"
++version = "1.0.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
+ name = "rustc_version"
+ version = "0.2.3"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+@@ -2856,7 +2879,7 @@
+ "app_units 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
+ "arrayvec 0.4.11 (registry+https://github.com/rust-lang/crates.io-index)",
+ "atomic_refcell 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
+- "bindgen 0.51.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
+ "byteorder 1.3.1 (registry+https://github.com/rust-lang/crates.io-index)",
+ "cssparser 0.25.7 (registry+https://github.com/rust-lang/crates.io-index)",
+@@ -3342,6 +3365,11 @@
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+
+ [[package]]
++name = "unicode-xid"
++version = "0.2.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
+ name = "unreachable"
+ version = "1.0.0"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+@@ -3574,14 +3602,6 @@
+ ]
+
+ [[package]]
+-name = "which"
+-version = "1.0.3"
+-source = "registry+https://github.com/rust-lang/crates.io-index"
+-dependencies = [
+- "libc 0.2.60 (registry+https://github.com/rust-lang/crates.io-index)",
+-]
+-
+-[[package]]
+ name = "winapi"
+ version = "0.2.8"
+ source = "registry+https://github.com/rust-lang/crates.io-index"
+@@ -3763,7 +3783,7 @@
+ "checksum base64 0.10.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0b25d992356d2eb0ed82172f5248873db5560c4721f564b13cb5193bda5e668e"
+ "checksum binary-space-partition 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "88ceb0d16c4fd0e42876e298d7d3ce3780dd9ebdcbe4199816a32c77e08597ff"
+ "checksum bincode 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bda13183df33055cbb84b847becce220d392df502ebe7a4a78d7021771ed94d0"
+-"checksum bindgen 0.51.0 (registry+https://github.com/rust-lang/crates.io-index)" = "18270cdd7065ec045a6bb4bdcd5144d14a78b3aedb3bc5111e688773ac8b9ad0"
++"checksum bindgen 0.51.1 (registry+https://github.com/rust-lang/crates.io-index)" = "ebd71393f1ec0509b553aa012b9b58e81dadbdff7130bd3b8cba576e69b32f75"
+ "checksum binjs_meta 0.5.2 (registry+https://github.com/rust-lang/crates.io-index)" = "6c9a0da2208ceb785c1626fa8b7d250d2e5546ae230294b4a998e4f818c1768e"
+ "checksum bit-vec 0.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "f59bbe95d4e52a6398ec21238d31577f2b28a9d86807f06ca59d191d8440d0bb"
+ "checksum bit_reverse 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "5e97e02db5a2899c0377f3d6031d5da8296ca2b47abef6ed699de51b9e40a28c"
+@@ -3935,10 +3955,12 @@
+ "checksum podio 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "e5422a1ee1bc57cc47ae717b0137314258138f38fd5f3cea083f43a9725383a0"
+ "checksum precomputed-hash 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
+ "checksum proc-macro2 0.4.27 (registry+https://github.com/rust-lang/crates.io-index)" = "4d317f9caece796be1980837fd5cb3dfec5613ebdb04ad0956deea83ce168915"
++"checksum proc-macro2 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "afdc77cc74ec70ed262262942ebb7dac3d479e9e5cfa2da1841c0806f6cdabcc"
+ "checksum procedural-masquerade 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "9f566249236c6ca4340f7ca78968271f0ed2b0f234007a61b66f9ecd0af09260"
+ "checksum quick-error 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "eda5fe9b71976e62bc81b781206aaa076401769b2143379d3eb2118388babac4"
+ "checksum quote 0.3.15 (registry+https://github.com/rust-lang/crates.io-index)" = "7a6e920b65c65f10b2ae65c831a81a073a89edd28c7cce89475bff467ab4167a"
+ "checksum quote 0.6.11 (registry+https://github.com/rust-lang/crates.io-index)" = "cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1"
++"checksum quote 1.0.2 (registry+https://github.com/rust-lang/crates.io-index)" = "053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe"
+ "checksum rand 0.3.22 (registry+https://github.com/rust-lang/crates.io-index)" = "15a732abf9d20f0ad8eeb6f909bf6868722d9a06e1e50802b6a70351f40b4eb1"
+ "checksum rand 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8356f47b32624fef5b3301c1be97e5944ecdd595409cc5da11d05f211db6cfbd"
+ "checksum rand 0.6.5 (registry+https://github.com/rust-lang/crates.io-index)" = "6d71dacdc3c88c1fde3885a3be3fbab9f35724e6ce99467f7d9c5026132184ca"
+@@ -3965,6 +3987,7 @@
+ "checksum rust-ini 0.10.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8a654c5bda722c699be6b0fe4c0d90de218928da5b724c3e467fc48865c37263"
+ "checksum rust_cascade 0.3.4 (registry+https://github.com/rust-lang/crates.io-index)" = "f3fe4900d38dab1ad21a515e44687dd0711e6b0ec5b214a3b1aa8857343bcf3a"
+ "checksum rustc-demangle 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "76d7ba1feafada44f2d38eed812bd2489a03c0f5abb975799251518b68848649"
++"checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8"
+ "checksum rustc_version 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "138e3e0acb6c9fb258b19b67cb8abd63c00679d2851805ea151465464fe9030a"
+ "checksum ryu 0.2.4 (registry+https://github.com/rust-lang/crates.io-index)" = "fd0568787116e13c652377b6846f5931454a363a8fdf8ae50463ee40935b278b"
+ "checksum safemem 0.3.0 (registry+https://github.com/rust-lang/crates.io-index)" = "8dca453248a96cb0749e36ccdfe2b0b4e54a61bfef89fb97ec621eb8e0a93dd9"
+@@ -4036,6 +4059,7 @@
+ "checksum unicode-segmentation 1.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "aa6024fc12ddfd1c6dbc14a80fa2324d4568849869b779f6bd37e5e4c03344d1"
+ "checksum unicode-width 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "bf3a113775714a22dcb774d8ea3655c53a32debae63a063acc00a91cc586245f"
+ "checksum unicode-xid 0.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"
++"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
+ "checksum unreachable 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "382810877fe448991dfc7f0dd6e3ae5d58088fd0ea5e35189655f84e6814fa56"
+ "checksum url 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "75b414f6c464c879d7f9babf951f23bc3743fb7313c081b2e6ca719067ea9d61"
+ "checksum urlencoding 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "3df3561629a8bb4c57e5a2e4c43348d9e29c7c29d9b1c4c1f47166deca8f37ed"
+@@ -4050,7 +4074,6 @@
+ "checksum warp 0.1.19 (registry+https://github.com/rust-lang/crates.io-index)" = "33857527c63bc514452f885d0a57019f28139c58fef2b3566016ecc0d44e5d24"
+ "checksum wasmparser 0.37.0 (registry+https://github.com/rust-lang/crates.io-index)" = "82dbea680995dad585289fd47889cf9614133ebfcc3bda95737ef8bdc9e11db6"
+ "checksum weedle 0.8.0 (registry+https://github.com/rust-lang/crates.io-index)" = "26a4c67f132386d965390b8a734d5d10adbcd30eb5cc74bd9229af8b83f10044"
+-"checksum which 1.0.3 (registry+https://github.com/rust-lang/crates.io-index)" = "4be6cfa54dab45266e98b5d7be2f8ce959ddd49abd141a05d52dce4b07f803bb"
+ "checksum winapi 0.2.8 (registry+https://github.com/rust-lang/crates.io-index)" = "167dc9d6949a9b857f3451275e911c3f44255842c1f7a76f33c55103a909087a"
+ "checksum winapi 0.3.6 (git+https://github.com/froydnj/winapi-rs?branch=aarch64)" = "<none>"
+ "checksum winapi-build 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "2d315eee3b34aca4797b2da6b13ed88266e6d612562a0c46390af8299fc699bc"
+diff --git a/js/rust/Cargo.toml b/js/rust/Cargo.toml
+--- a/js/rust/Cargo.toml
++++ b/js/rust/Cargo.toml
+@@ -7,7 +7,7 @@
+
+ [build-dependencies]
+ env_logger = {version = "0.5", default-features = false} # disable `regex` to reduce code size
+-bindgen = {version = "0.51", default-features = false} # disable `logging` to reduce code size
++bindgen = {version = "0.51.1", default-features = false} # disable `logging` to reduce code size
+ cmake = "0.1"
+ glob = "0.2.11"
+
+diff --git a/js/src/wasm/cranelift/Cargo.toml b/js/src/wasm/cranelift/Cargo.toml
+--- a/js/src/wasm/cranelift/Cargo.toml
++++ b/js/src/wasm/cranelift/Cargo.toml
+@@ -20,7 +20,7 @@
+ smallvec = { version = "0.6.6" }
+
+ [build-dependencies]
+-bindgen = {version = "0.51", default-features = false} # disable `logging` to reduce code size
++bindgen = {version = "0.51.1", default-features = false} # disable `logging` to reduce code size
+
+ [features]
+ default = ['cranelift-codegen/std']
+diff --git a/servo/components/style/Cargo.toml b/servo/components/style/Cargo.toml
+--- a/servo/components/style/Cargo.toml
++++ b/servo/components/style/Cargo.toml
+@@ -84,7 +84,7 @@
+ [build-dependencies]
+ lazy_static = "1"
+ log = "0.4"
+-bindgen = {version = "0.51", optional = true, default-features = false}
++bindgen = {version = "0.51.1", optional = true, default-features = false}
+ regex = {version = "1.0", optional = true}
+ walkdir = "2.1.4"
+ toml = {version = "0.4.5", optional = true, default-features = false}
+diff --git a/third_party/rust/bindgen/.cargo-checksum.json b/third_party/rust/bindgen/.cargo-checksum.json
+--- a/third_party/rust/bindgen/.cargo-checksum.json
++++ b/third_party/rust/bindgen/.cargo-checksum.json
+@@ -1 +1 @@
+-{"files":{"Cargo.toml":"a970d1a9e47f029fe3e3ce43ae4292054ad60c4490e035b86b669fcf32015624","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"5a1f556c6a57c0a6ccc65e19c27718e0f4b32381a8efcc80f6601b33c58c5d59","build.rs":"a9f6915c54d75f357ce32f96327bf4df53dc81a505b70831978f9dac6f43841d","csmith-fuzzing/README.md":"7107b70fedb0c0a0cadb3c439a49c1bd0119a6d38dc63b1aecc74d1942256ef2","src/callbacks.rs":"b24d7982332c6a35928f134184ddf4072fe4545a45546b97b9b0e0c1fbb77c08","src/clang.rs":"0009b8b6e3f6c33ec2776ec4cb8de2625ae8be22c9f0433c39b06bdd9fc7db4d","src/codegen/bitfield_unit.rs":"87be2d7f7774327a1608c2c4d64df869e17fb34b7efdacb4030b6794d98e40f3","src/codegen/bitfield_unit_tests.rs":"2073ac6a36e0bc9afaef5b1207966817c8fb7a1a9f6368c3b1b8f79822efbfba","src/codegen/error.rs":"2613af1d833377fd4a70719f4a09951d9d45dc9227827b9a2a938a1bcaaea2dd","src/codegen/helpers.rs":"aa0daff2061c6de31acfbd113190889e0f7ca2d2b8d4f46740bfd5832c33e4d2","src/codegen/impl_debug.rs":"f82969461d522fb758eca552ceaf189122a404cbb47fcc16008bfe52fc62aefa","src/codegen/impl_partialeq.rs":"d40d9ee2849c4d3d557b033c4d3af5e6de4a44347f67c0f016198086338811af","src/codegen/mod.rs":"ad116e2af755219bd556e8ee00ca48562f64a161d0b2c94846ae01297446ea0d","src/codegen/struct_layout.rs":"3fa5524aff82365ce292b0cc85080514c85a6dbd31bce90f001773b995dda28e","src/extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","src/features.rs":"82511f1bb8cbd04d98f57b000903b0264237962af93a72a58220437213eba1ef","src/ir/analysis/derive.rs":"325d4c1c1e6194e743f42a2316f1501b0ef852fe309f2e9cac3434825ad235f0","src/ir/analysis/has_destructor.rs":"63644f479738df35e531d3324ff892614083c3656e0747aa34d9f20dada878ec","src/ir/analysis/has_float.rs":"76162a309e4285a806755a08c687a3e7bc894a100a63da4e88584035e215b11d","src/ir/analysis/has_type_param_in_array.rs":"fdbc0af28a144c88ea2de83e6e6da5e1ffb40e3dd63fd7a708095d085bb06f94","src/ir/analysis/has_vtable.rs":"5788372d27bdbaaf0454bc17be31a5480918bc41a8a1c4832e8c61185c07f9cd","src/ir/analysis/mod.rs":"1f218e15c19f6666512908abc853fa7ff9ca5d0fafd94f026d9e4b0ce287ec3c","src/ir/analysis/sizedness.rs":"b73865b6689d4f3546edd91909a47c329c4ae630ea97715d29bc683ae1dc17ad","src/ir/analysis/template_params.rs":"6312c008bbc80f50e72a766756c8daddea0b6eeb31ec924b83a231df931e170e","src/ir/annotations.rs":"39a5ab19f4d5dfa617577e4a0d0d2b67b5369d480c7cca4b14d172458c9843f0","src/ir/comment.rs":"c48abe01c5af0f09f583a89f1394bc6c161b40f6c8f0f600bbfe3c907b47969b","src/ir/comp.rs":"78e245835efcd22b5cc878a8a7031171116c708408bdb48b0c9284a067041e56","src/ir/context.rs":"8fd64654343295e0e4a43efe5db6f64315dcd50a5015c3d86e90aae992e2fa9f","src/ir/derive.rs":"34f9aa76b6c9c05136bb69dcd6455397faef571a567254d2c541d50a962994db","src/ir/dot.rs":"95ed2968fc3239d87892e9f1edf1ed6dd18630d949564961765967ea1d16960c","src/ir/enum_ty.rs":"7658cf68c00b1732dfa599c2d6b6a93a82de8401142591c3afe5fcb27d901a66","src/ir/function.rs":"c2feb2e26d47aa96a74af9912ada26be077e2b0c36d46fa10167da7109590500","src/ir/int.rs":"5b8d5bcedb04f39dc2d7e571bc04114b1f1e09cf294afe944c2e7879451c4378","src/ir/item.rs":"3bcdb69b793350e5744aec3577cdbb1e5068ece5220c38763cecd82dfb5e8f03","src/ir/item_kind.rs":"dbeae8c4fd0e5c9485d325aea040e056a1f2cd6d43fc927dee8fe1c0c59a7197","src/ir/layout.rs":"d49582081f5f86f7595afbe4845f38fb3b969a840b568f4a49b265e7d790bb5b","src/ir/mod.rs":"2eae90f207fad2e45957ec9287064992a419e3fc916aba84faff2ea25cbeb5ee","src/ir/module.rs":"c4d90bf38fe3672e01923734ccbdb7951ea929949d5f413a9c2aee12395a5094","src/ir/objc.rs":"758aa955a0c5d6ad82606c88a1f4cd1d93e666b71e82d43b18b1aaae96cf888a","src/ir/template.rs":"c0f8570b927dfd6a421fc4ce3094ec837a3ed936445225dbfac961e8e0842ae5","src/ir/traversal.rs":"ea751379a5aec02f93f8d2c61e18232776b1f000dbeae64b9a7195ba21a19dd6","src/ir/ty.rs":"e6771c8102b9f01b0c4b664bf1151b4773b599634a83895376ce122ca9f74f8b","src/ir/var.rs":"8bdafb6d02f2c55ae11c28d88b19fb7a65ba8466da12ff039ae4c16c790b291e","src/lib.rs":"a07ef7a3d099493555ae5a58b7b4bf9106a978e38a23b5ff445a83b92727ab62","src/log_stubs.rs":"6dfdd908b7c6453da416cf232893768f9480e551ca4add0858ef88bf71ee6ceb","src/main.rs":"8c96cd2a051e3f09b1b87b75cd9ed77e82e889c8309ebd3e4bc782960cf63e58","src/options.rs":"5b309b225cc51e665bd42ed3e7965a7cd73d984e4455a2d76987fc42ab271ff8","src/parse.rs":"be7d13cc84fae79ec7b3aa9e77063fa475a48d74a854423e2c72d75006a25202","src/regex_set.rs":"5cb72fc3714c0d79e9e942d003349c0775fafd7cd0c9603c65f5261883bbf9cf","src/time.rs":"3b763e6fee51d0eb01228dfe28bc28a9f692aff73b2a7b90a030902e0238fca6"},"package":"18270cdd7065ec045a6bb4bdcd5144d14a78b3aedb3bc5111e688773ac8b9ad0"}
+\ No newline at end of file
++{"files":{"Cargo.lock":"f1b56f3cb914b4ed3214d3ce87d599398b399841718fc938c1b5a309356a44ea","Cargo.toml":"a4656cdd7bd0794e6f10ba78ed3c9a82cd86bfcbec59be7731ee90984de64bde","LICENSE":"c23953d9deb0a3312dbeaf6c128a657f3591acee45067612fa68405eaa4525db","README.md":"5a1f556c6a57c0a6ccc65e19c27718e0f4b32381a8efcc80f6601b33c58c5d59","build.rs":"e1f148e01150af6a66b6af2e5d955c8b9fa092cb4697bae2bcec8a00119055ae","csmith-fuzzing/README.md":"7107b70fedb0c0a0cadb3c439a49c1bd0119a6d38dc63b1aecc74d1942256ef2","src/callbacks.rs":"82e0be9ca02e9a652af934ed546f1cedfc6db0716643123d9a5aab33b360c7d0","src/clang.rs":"66e86bfbbe872cc247cf3bc88a2155e25f587414834023515d184dc13f8f7287","src/codegen/bitfield_unit.rs":"a8fb1a2d97a99685106fcaac87d2013f79d2690d6a46ff05ad1e3629b6075664","src/codegen/bitfield_unit_tests.rs":"dd252134118450800b516e375c872e17b4c1aee63a7b8adbe5b2cd53434bbc7e","src/codegen/error.rs":"ebc9e0f50c6adc9558b76ce774346c02316500a1ebe3cbf56ed00e5e9fe3e456","src/codegen/helpers.rs":"304c9eb56ea6b2c054e1f9fefd5812b0df3a156eee5876f3051fd0b48c7aeac3","src/codegen/impl_debug.rs":"428df604b4be105e3275275e8be81e8306b940abddc1b89675f98a01bf5de8c1","src/codegen/impl_partialeq.rs":"83707f7b13501dc413c904a17163cb54af11e56138f36dfef40ce46c823200fd","src/codegen/mod.rs":"42732503dd25ed4b7924b71862f9100cf281e22f99016540da61a602c78a3650","src/codegen/struct_layout.rs":"482bab6384e65c78346de4f8d8e4d1c3b7df38250788b58bdd1f7b1c7bf70bac","src/extra_assertions.rs":"494534bd4f18b80d89b180c8a93733e6617edcf7deac413e9a73fd6e7bc9ced7","src/features.rs":"2d82f0700c22ea44e010a89c3ae857c3feaf2c85cab3fe4d0277a41a8c2841c4","src/ir/analysis/derive.rs":"2a2322f178760859cdb4b2d45d947ff213c7c684840b4ade46b7ceb34fa6705b","src/ir/analysis/has_destructor.rs":"10380d06ed03d058f10f6f6835d9b8fbebac455a1ea218780430a0ffd8d63472","src/ir/analysis/has_float.rs":"1838ba81eb05a9c3e311687e2247d561cc5093377b15ef8008257025ea56da04","src/ir/analysis/has_type_param_in_array.rs":"dddc5511a705e3a653b5e754e359637031b4862e1a1fc1e17f711fb2fbfc1cef","src/ir/analysis/has_vtable.rs":"8da9deec23c4552ecd5b883eaa036e4f2174a5949194c333a62ef463d28dcb6a","src/ir/analysis/mod.rs":"54993cb77df1870bb12cbc6b3a243c2da942cdc967a7d21dacb430601b49b2a1","src/ir/analysis/sizedness.rs":"d0673e19add38a07680ae3a9a5e998a0b2c3917e68efb6639ffe7ea193ada1b1","src/ir/analysis/template_params.rs":"9b662b5ec99cd8409d771a16ee42df500962b0c26f0da85e430ede19cc2b17c9","src/ir/annotations.rs":"268f90fc1d40fadee329c26b183b2aaa9de98f9246fea580404ee0e626315546","src/ir/comment.rs":"31d64a49ae3d9c3c348fa2539e03306ca3a23fae429cab452e42b31ecf632145","src/ir/comp.rs":"73d5d32d70b8e62d33ad4ed6bcbb9b23273c59b5b45570b85a2357c6e1116028","src/ir/context.rs":"c30be52b22fdb489afb34426bcb2e048ae2594846b15324693dd1b71e7dc3369","src/ir/derive.rs":"e5581852eec87918901a129284b4965aefc8a19394187a8095779a084f28fabe","src/ir/dot.rs":"5da8336bf5fd8efabd784a06e0d764eb91566c19ced8ce017a24ae237f0cbe18","src/ir/enum_ty.rs":"c303f3b271d2703c2487e4afaf4b8c9b5bbedb9e1c6a8044de667c21ad8f67fb","src/ir/function.rs":"7a25a55d7f2ded1724894bd1f7ee4766a4bf5f193967bf3a2628ec604b918018","src/ir/int.rs":"68a86182743ec338d58e42203364dc7c8970cb7ec3550433ca92f0c9489b4442","src/ir/item.rs":"203fe53efb0203e0ddc3fb9fcff7b2068f80f252d249a39c137e0cc070663a49","src/ir/item_kind.rs":"7666a1ff1b8260978b790a08b4139ab56b5c65714a5652bbcec7faa7443adc36","src/ir/layout.rs":"936f96fafab34e35b622a5f9e56b0fbd2c97d2e9222470e3687f882f40db1349","src/ir/mod.rs":"713cd537434567003197a123cbae679602c715e976d22f7b23dafd0826ea4c70","src/ir/module.rs":"a26bb0ac90d4cabb0a45d9c1a42b5515c74d4c233dc084e2f85161eac12bff15","src/ir/objc.rs":"ced8242068d5daa2940469693f7277c79368019f8e30ce1e4f55d834bf24c411","src/ir/template.rs":"6c2823c9bab82ab1d70f4d643e8f4d6420be5eafcb78324fb69649e407561cec","src/ir/traversal.rs":"5ac088277f4dfe2918d81b9294aaee41fd83db8e46def66a05f89de078bf4c49","src/ir/ty.rs":"5af2b62d278c679b7c4e597263fce01113e90242e7d263b948d93bc4274dfe9a","src/ir/var.rs":"9226241b188877b6a7bea6523e14318a8523a6dba57c4f15809c377f87540061","src/lib.rs":"b968f8d0858e3145137a2e33c0913acf19d21f884f914bc513bc18eea1c37bf1","src/log_stubs.rs":"6dfdd908b7c6453da416cf232893768f9480e551ca4add0858ef88bf71ee6ceb","src/main.rs":"6b42a74dfd5c3bde75b7fb984a82f3b3d652abd45aa54b31a40fbda6b02ae674","src/options.rs":"f08facc9d58cb79c7ab93c9d614f13d4d3eca2b5801012da56490a790a8d8c4c","src/parse.rs":"be7d13cc84fae79ec7b3aa9e77063fa475a48d74a854423e2c72d75006a25202","src/regex_set.rs":"5cb72fc3714c0d79e9e942d003349c0775fafd7cd0c9603c65f5261883bbf9cf","src/time.rs":"8efe317e7c6b5ba8e0865ce7b49ca775ee8a02590f4241ef62f647fa3c22b68e"},"package":"ebd71393f1ec0509b553aa012b9b58e81dadbdff7130bd3b8cba576e69b32f75"}
+\ No newline at end of file
+diff --git a/third_party/rust/bindgen/Cargo.lock b/third_party/rust/bindgen/Cargo.lock
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/bindgen/Cargo.lock
+@@ -0,0 +1,413 @@
++# This file is automatically @generated by Cargo.
++# It is not intended for manual editing.
++[[package]]
++name = "aho-corasick"
++version = "0.6.8"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "ansi_term"
++version = "0.11.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "atty"
++version = "0.2.11"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
++ "termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)",
++ "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "bindgen"
++version = "0.51.1"
++dependencies = [
++ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
++ "cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)",
++ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "clang-sys 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)",
++ "env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)",
++ "proc-macro2 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "quote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
++ "shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
++ "which 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "bitflags"
++version = "1.0.4"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "byteorder"
++version = "1.2.7"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "cc"
++version = "1.0.25"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "cexpr"
++version = "0.3.3"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "nom 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "cfg-if"
++version = "0.1.5"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "clang-sys"
++version = "0.28.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
++ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
++ "libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "clap"
++version = "2.32.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
++ "bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
++ "strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "diff"
++version = "0.1.11"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "env_logger"
++version = "0.6.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)",
++ "humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
++ "log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "glob"
++version = "0.2.11"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "humantime"
++version = "1.1.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "lazy_static"
++version = "1.1.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "version_check 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "libc"
++version = "0.2.43"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "libloading"
++version = "0.5.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)",
++ "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "log"
++version = "0.4.5"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "memchr"
++version = "2.1.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
++ "version_check 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "nom"
++version = "4.0.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "peeking_take_while"
++version = "0.1.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "proc-macro2"
++version = "1.0.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "quick-error"
++version = "1.2.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "quote"
++version = "1.0.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "proc-macro2 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "redox_syscall"
++version = "0.1.40"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "redox_termios"
++version = "0.1.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "regex"
++version = "1.0.5"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)",
++ "memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
++ "thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)",
++ "utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "regex-syntax"
++version = "0.6.2"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "rustc-hash"
++version = "1.0.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "shlex"
++version = "0.1.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "strsim"
++version = "0.7.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "termcolor"
++version = "1.0.4"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "termion"
++version = "1.5.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
++ "redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)",
++ "redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "textwrap"
++version = "0.10.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "thread_local"
++version = "0.3.6"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "ucd-util"
++version = "0.1.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "unicode-width"
++version = "0.1.5"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "unicode-xid"
++version = "0.2.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "utf8-ranges"
++version = "1.0.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "vec_map"
++version = "0.8.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "version_check"
++version = "0.1.4"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "which"
++version = "3.0.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "winapi"
++version = "0.3.5"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
++ "winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "winapi-i686-pc-windows-gnu"
++version = "0.4.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "winapi-util"
++version = "0.1.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[[package]]
++name = "winapi-x86_64-pc-windows-gnu"
++version = "0.4.0"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++
++[[package]]
++name = "wincolor"
++version = "1.0.1"
++source = "registry+https://github.com/rust-lang/crates.io-index"
++dependencies = [
++ "winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)",
++ "winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)",
++]
++
++[metadata]
++"checksum aho-corasick 0.6.8 (registry+https://github.com/rust-lang/crates.io-index)" = "68f56c7353e5a9547cbd76ed90f7bb5ffc3ba09d4ea9bd1d8c06c8b1142eeb5a"
++"checksum ansi_term 0.11.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ee49baf6cb617b853aa8d93bf420db2383fab46d314482ca2803b40d5fde979b"
++"checksum atty 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "9a7d5b8723950951411ee34d271d99dddcc2035a16ab25310ea2c8cfd4369652"
++"checksum bitflags 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "228047a76f468627ca71776ecdebd732a3423081fcf5125585bcd7c49886ce12"
++"checksum byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)" = "94f88df23a25417badc922ab0f5716cc1330e87f71ddd9203b3a3ccd9cedf75d"
++"checksum cc 1.0.25 (registry+https://github.com/rust-lang/crates.io-index)" = "f159dfd43363c4d08055a07703eb7a3406b0dac4d0584d96965a3262db3c9d16"
++"checksum cexpr 0.3.3 (registry+https://github.com/rust-lang/crates.io-index)" = "8fc0086be9ca82f7fc89fc873435531cb898b86e850005850de1f820e2db6e9b"
++"checksum cfg-if 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "0c4e7bb64a8ebb0d856483e1e682ea3422f883c5f5615a90d51a2c82fe87fdd3"
++"checksum clang-sys 0.28.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4227269cec09f5f83ff160be12a1e9b0262dd1aa305302d5ba296c2ebd291055"
++"checksum clap 2.32.0 (registry+https://github.com/rust-lang/crates.io-index)" = "b957d88f4b6a63b9d70d5f454ac8011819c6efa7727858f458ab71c756ce2d3e"
++"checksum diff 0.1.11 (registry+https://github.com/rust-lang/crates.io-index)" = "3c2b69f912779fbb121ceb775d74d51e915af17aaebc38d28a592843a2dd0a3a"
++"checksum env_logger 0.6.0 (registry+https://github.com/rust-lang/crates.io-index)" = "afb070faf94c85d17d50ca44f6ad076bce18ae92f0037d350947240a36e9d42e"
++"checksum glob 0.2.11 (registry+https://github.com/rust-lang/crates.io-index)" = "8be18de09a56b60ed0edf84bc9df007e30040691af7acd1c41874faac5895bfb"
++"checksum humantime 1.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0484fda3e7007f2a4a0d9c3a703ca38c71c54c55602ce4660c419fd32e188c9e"
++"checksum lazy_static 1.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ca488b89a5657b0a2ecd45b95609b3e848cf1755da332a0da46e2b2b1cb371a7"
++"checksum libc 0.2.43 (registry+https://github.com/rust-lang/crates.io-index)" = "76e3a3ef172f1a0b9a9ff0dd1491ae5e6c948b94479a3021819ba7d860c8645d"
++"checksum libloading 0.5.0 (registry+https://github.com/rust-lang/crates.io-index)" = "9c3ad660d7cb8c5822cd83d10897b0f1f1526792737a179e73896152f85b88c2"
++"checksum log 0.4.5 (registry+https://github.com/rust-lang/crates.io-index)" = "d4fcce5fa49cc693c312001daf1d13411c4a5283796bac1084299ea3e567113f"
++"checksum memchr 2.1.0 (registry+https://github.com/rust-lang/crates.io-index)" = "4b3629fe9fdbff6daa6c33b90f7c08355c1aca05a3d01fa8063b822fcf185f3b"
++"checksum nom 4.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "898696750eb5c3ce5eb5afbfbe46e7f7c4e1936e19d3e97be4b7937da7b6d114"
++"checksum peeking_take_while 0.1.2 (registry+https://github.com/rust-lang/crates.io-index)" = "19b17cddbe7ec3f8bc800887bab5e717348c95ea2ca0b1bf0837fb964dc67099"
++"checksum proc-macro2 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "19f287c234c9b2d0308d692dee5c449c1a171167a6f8150f7cf2a49d8fd96967"
++"checksum quick-error 1.2.2 (registry+https://github.com/rust-lang/crates.io-index)" = "9274b940887ce9addde99c4eee6b5c44cc494b182b97e73dc8ffdcb3397fd3f0"
++"checksum quote 1.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "7ab938ebe6f1c82426b5fb82eaf10c3e3028c53deaa3fbe38f5904b37cf4d767"
++"checksum redox_syscall 0.1.40 (registry+https://github.com/rust-lang/crates.io-index)" = "c214e91d3ecf43e9a4e41e578973adeb14b474f2bee858742d127af75a0112b1"
++"checksum redox_termios 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7e891cfe48e9100a70a3b6eb652fef28920c117d366339687bd5576160db0f76"
++"checksum regex 1.0.5 (registry+https://github.com/rust-lang/crates.io-index)" = "2069749032ea3ec200ca51e4a31df41759190a88edca0d2d86ee8bedf7073341"
++"checksum regex-syntax 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "747ba3b235651f6e2f67dfa8bcdcd073ddb7c243cb21c442fc12395dfcac212d"
++"checksum rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8"
++"checksum shlex 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "7fdf1b9db47230893d76faad238fd6097fd6d6a9245cd7a4d90dbd639536bbd2"
++"checksum strsim 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)" = "bb4f380125926a99e52bc279241539c018323fab05ad6368b56f93d9369ff550"
++"checksum termcolor 1.0.4 (registry+https://github.com/rust-lang/crates.io-index)" = "4096add70612622289f2fdcdbd5086dc81c1e2675e6ae58d6c4f62a16c6d7f2f"
++"checksum termion 1.5.1 (registry+https://github.com/rust-lang/crates.io-index)" = "689a3bdfaab439fd92bc87df5c4c78417d3cbe537487274e9b0b2dce76e92096"
++"checksum textwrap 0.10.0 (registry+https://github.com/rust-lang/crates.io-index)" = "307686869c93e71f94da64286f9a9524c0f308a9e1c87a583de8e9c9039ad3f6"
++"checksum thread_local 0.3.6 (registry+https://github.com/rust-lang/crates.io-index)" = "c6b53e329000edc2b34dbe8545fd20e55a333362d0a321909685a19bd28c3f1b"
++"checksum ucd-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd2be2d6639d0f8fe6cdda291ad456e23629558d466e2789d2c3e9892bda285d"
++"checksum unicode-width 0.1.5 (registry+https://github.com/rust-lang/crates.io-index)" = "882386231c45df4700b275c7ff55b6f3698780a650026380e72dabe76fa46526"
++"checksum unicode-xid 0.2.0 (registry+https://github.com/rust-lang/crates.io-index)" = "826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"
++"checksum utf8-ranges 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "fd70f467df6810094968e2fce0ee1bd0e87157aceb026a8c083bcf5e25b9efe4"
++"checksum vec_map 0.8.1 (registry+https://github.com/rust-lang/crates.io-index)" = "05c78687fb1a80548ae3250346c3db86a80a7cdd77bda190189f2d0a0987c81a"
++"checksum version_check 0.1.4 (registry+https://github.com/rust-lang/crates.io-index)" = "7716c242968ee87e5542f8021178248f267f295a5c4803beae8b8b7fd9bc6051"
++"checksum which 3.0.0 (registry+https://github.com/rust-lang/crates.io-index)" = "240a31163872f7e8e49f35b42b58485e35355b07eb009d9f3686733541339a69"
++"checksum winapi 0.3.5 (registry+https://github.com/rust-lang/crates.io-index)" = "773ef9dcc5f24b7d850d0ff101e542ff24c3b090a9768e03ff889fdef41f00fd"
++"checksum winapi-i686-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "ac3b87c63620426dd9b991e5ce0329eff545bccbbb34f3be09ff6fb6ab51b7b6"
++"checksum winapi-util 0.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "afc5508759c5bf4285e61feb862b6083c8480aec864fa17a81fdec6f69b461ab"
++"checksum winapi-x86_64-pc-windows-gnu 0.4.0 (registry+https://github.com/rust-lang/crates.io-index)" = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
++"checksum wincolor 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)" = "561ed901ae465d6185fa7864d63fbd5720d0ef718366c9a4dc83cf6170d7e9ba"
+diff --git a/third_party/rust/bindgen/Cargo.toml b/third_party/rust/bindgen/Cargo.toml
+--- a/third_party/rust/bindgen/Cargo.toml
++++ b/third_party/rust/bindgen/Cargo.toml
+@@ -12,7 +12,7 @@
+
+ [package]
+ name = "bindgen"
+-version = "0.51.0"
++version = "0.51.1"
+ authors = ["Jyun-Yan You <jyyou.tw@gmail.com>", "Emilio Cobos Ãlvarez <emilio@crisal.io>", "Nick Fitzgerald <fitzgen@gmail.com>", "The Servo project developers"]
+ build = "build.rs"
+ include = ["LICENSE", "README.md", "Cargo.toml", "build.rs", "src/*.rs", "src/**/*.rs"]
+@@ -54,9 +54,6 @@
+ version = "0.6"
+ optional = true
+
+-[dependencies.fxhash]
+-version = "0.2"
+-
+ [dependencies.lazy_static]
+ version = "1"
+
+@@ -68,21 +65,26 @@
+ version = "0.1.2"
+
+ [dependencies.proc-macro2]
+-version = "0.4"
++version = "1"
+ default-features = false
+
+ [dependencies.quote]
+-version = "0.6"
++version = "1"
+ default-features = false
+
+ [dependencies.regex]
+ version = "1.0"
+
++[dependencies.rustc-hash]
++version = "1.0.1"
++
+ [dependencies.shlex]
+ version = "0.1"
+
+ [dependencies.which]
+-version = ">=1.0, <3.0"
++version = "3.0"
++optional = true
++default-features = false
+ [dev-dependencies.clap]
+ version = "2"
+
+@@ -93,7 +95,7 @@
+ version = "0.1"
+
+ [features]
+-default = ["logging", "clap"]
++default = ["logging", "clap", "which-rustfmt"]
+ logging = ["env_logger", "log"]
+ static = []
+ testing_only_docs = []
+@@ -102,5 +104,6 @@
+ testing_only_libclang_3_9 = []
+ testing_only_libclang_4 = []
+ testing_only_libclang_5 = []
++which-rustfmt = ["which"]
+ [badges.travis-ci]
+ repository = "rust-lang/rust-bindgen"
+diff --git a/third_party/rust/bindgen/build.rs b/third_party/rust/bindgen/build.rs
+--- a/third_party/rust/bindgen/build.rs
++++ b/third_party/rust/bindgen/build.rs
+@@ -7,8 +7,8 @@
+ pub fn main() {
+ let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
+
+- let mut dst = File::create(Path::new(&out_dir).join("host-target.txt"))
+- .unwrap();
++ let mut dst =
++ File::create(Path::new(&out_dir).join("host-target.txt")).unwrap();
+ dst.write_all(env::var("TARGET").unwrap().as_bytes())
+ .unwrap();
+ }
+@@ -24,8 +24,8 @@
+
+ pub fn main() {
+ let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap());
+- let mut dst = File::create(Path::new(&out_dir).join("tests.rs"))
+- .unwrap();
++ let mut dst =
++ File::create(Path::new(&out_dir).join("tests.rs")).unwrap();
+
+ let manifest_dir =
+ PathBuf::from(env::var("CARGO_MANIFEST_DIR").unwrap());
+@@ -57,7 +57,8 @@
+ "test_header!(header_{}, {:?});",
+ func,
+ entry.path(),
+- ).unwrap();
++ )
++ .unwrap();
+ }
+ _ => {}
+ }
+diff --git a/third_party/rust/bindgen/src/callbacks.rs b/third_party/rust/bindgen/src/callbacks.rs
+--- a/third_party/rust/bindgen/src/callbacks.rs
++++ b/third_party/rust/bindgen/src/callbacks.rs
+@@ -37,8 +37,7 @@
+
+ /// This will be run on every string macro. The callback can not influence the further
+ /// treatment of the macro, but may use the value to generate additional code or configuration.
+- fn str_macro(&self, _name: &str, _value: &[u8]) {
+- }
++ fn str_macro(&self, _name: &str, _value: &[u8]) {}
+
+ /// This function should return whether, given an enum variant
+ /// name, and value, this enum variant will forcibly be a constant.
+diff --git a/third_party/rust/bindgen/src/clang.rs b/third_party/rust/bindgen/src/clang.rs
+--- a/third_party/rust/bindgen/src/clang.rs
++++ b/third_party/rust/bindgen/src/clang.rs
+@@ -6,14 +6,14 @@
+
+ use cexpr;
+ use clang_sys::*;
++use ir::context::BindgenContext;
+ use regex;
+-use ir::context::BindgenContext;
+-use std::{mem, ptr, slice};
+ use std::ffi::{CStr, CString};
+ use std::fmt;
+ use std::hash::Hash;
+ use std::hash::Hasher;
+-use std::os::raw::{c_char, c_int, c_uint, c_ulong, c_longlong, c_ulonglong};
++use std::os::raw::{c_char, c_int, c_longlong, c_uint, c_ulong, c_ulonglong};
++use std::{mem, ptr, slice};
+
+ /// A cursor into the Clang AST, pointing to an AST node.
+ ///
+@@ -43,7 +43,11 @@
+ /// The USR can be used to compare entities across translation units.
+ pub fn usr(&self) -> Option<String> {
+ let s = unsafe { cxstring_into_string(clang_getCursorUSR(self.x)) };
+- if s.is_empty() { None } else { Some(s) }
++ if s.is_empty() {
++ None
++ } else {
++ Some(s)
++ }
+ }
+
+ /// Is this cursor's referent a declaration?
+@@ -210,8 +214,9 @@
+
+ while semantic_parent.is_some() &&
+ (semantic_parent.unwrap().kind() == CXCursor_Namespace ||
+- semantic_parent.unwrap().kind() == CXCursor_NamespaceAlias ||
+- semantic_parent.unwrap().kind() == CXCursor_NamespaceRef)
++ semantic_parent.unwrap().kind() ==
++ CXCursor_NamespaceAlias ||
++ semantic_parent.unwrap().kind() == CXCursor_NamespaceRef)
+ {
+ semantic_parent =
+ semantic_parent.unwrap().fallible_semantic_parent();
+@@ -300,7 +305,11 @@
+ let s = unsafe {
+ cxstring_into_string(clang_Cursor_getRawCommentText(self.x))
+ };
+- if s.is_empty() { None } else { Some(s) }
++ if s.is_empty() {
++ None
++ } else {
++ Some(s)
++ }
+ }
+
+ /// Get the referent's parsed comment.
+@@ -346,7 +355,11 @@
+ x: clang_getCursorReferenced(self.x),
+ };
+
+- if ret.is_valid() { Some(ret) } else { None }
++ if ret.is_valid() {
++ Some(ret)
++ } else {
++ None
++ }
+ }
+ }
+
+@@ -371,7 +384,11 @@
+ let ret = Cursor {
+ x: clang_getSpecializedCursorTemplate(self.x),
+ };
+- if ret.is_valid() { Some(ret) } else { None }
++ if ret.is_valid() {
++ Some(ret)
++ } else {
++ None
++ }
+ }
+ }
+
+@@ -438,11 +455,13 @@
+ pub fn contains_cursor(&self, kind: CXCursorKind) -> bool {
+ let mut found = false;
+
+- self.visit(|c| if c.kind() == kind {
+- found = true;
+- CXChildVisit_Break
+- } else {
+- CXChildVisit_Continue
++ self.visit(|c| {
++ if c.kind() == kind {
++ found = true;
++ CXChildVisit_Break
++ } else {
++ CXChildVisit_Continue
++ }
+ });
+
+ found
+@@ -459,7 +478,11 @@
+ pub fn bit_width(&self) -> Option<u32> {
+ unsafe {
+ let w = clang_getFieldDeclBitWidth(self.x);
+- if w == -1 { None } else { Some(w as u32) }
++ if w == -1 {
++ None
++ } else {
++ Some(w as u32)
++ }
+ }
+ }
+
+@@ -470,7 +493,11 @@
+ let t = Type {
+ x: clang_getEnumDeclIntegerType(self.x),
+ };
+- if t.is_valid() { Some(t) } else { None }
++ if t.is_valid() {
++ Some(t)
++ } else {
++ None
++ }
+ }
+ }
+
+@@ -509,7 +536,8 @@
+ self.visit(|cur| {
+ if cur.kind() == CXCursor_UnexposedAttr {
+ found_attr = cur.tokens().iter().any(|t| {
+- t.kind == CXToken_Identifier && t.spelling() == attr.as_bytes()
++ t.kind == CXToken_Identifier &&
++ t.spelling() == attr.as_bytes()
+ });
+
+ if found_attr {
+@@ -530,7 +558,11 @@
+ x: unsafe { clang_getTypedefDeclUnderlyingType(self.x) },
+ };
+
+- if inner.is_valid() { Some(inner) } else { None }
++ if inner.is_valid() {
++ Some(inner)
++ } else {
++ None
++ }
+ }
+
+ /// Get the linkage kind for this cursor's referent.
+@@ -559,12 +591,11 @@
+ // CXCursor_FunctionDecl |
+ // CXCursor_CXXMethod => {
+ self.num_args().ok().map(|num| {
+- (0..num).map(|i| {
+- Cursor {
++ (0..num)
++ .map(|i| Cursor {
+ x: unsafe { clang_Cursor_getArgument(self.x, i as c_uint) },
+- }
+- })
+- .collect()
++ })
++ .collect()
+ })
+ }
+
+@@ -576,7 +607,11 @@
+ pub fn num_args(&self) -> Result<u32, ()> {
+ unsafe {
+ let w = clang_Cursor_getNumArguments(self.x);
+- if w == -1 { Err(()) } else { Ok(w as u32) }
++ if w == -1 {
++ Err(())
++ } else {
++ Ok(w as u32)
++ }
+ }
+ }
+
+@@ -642,7 +677,11 @@
+ let rt = Type {
+ x: unsafe { clang_getCursorResultType(self.x) },
+ };
+- if rt.is_valid() { Some(rt) } else { None }
++ if rt.is_valid() {
++ Some(rt)
++ } else {
++ None
++ }
+ }
+
+ /// Gets the tokens that correspond to that cursor.
+@@ -654,26 +693,29 @@
+ pub fn cexpr_tokens(self) -> Vec<cexpr::token::Token> {
+ use cexpr::token;
+
+- self.tokens().iter().filter_map(|token| {
+- let kind = match token.kind {
+- CXToken_Punctuation => token::Kind::Punctuation,
+- CXToken_Literal => token::Kind::Literal,
+- CXToken_Identifier => token::Kind::Identifier,
+- CXToken_Keyword => token::Kind::Keyword,
+- // NB: cexpr is not too happy about comments inside
+- // expressions, so we strip them down here.
+- CXToken_Comment => return None,
+- _ => {
+- error!("Found unexpected token kind: {:?}", token);
+- return None;
+- }
+- };
++ self.tokens()
++ .iter()
++ .filter_map(|token| {
++ let kind = match token.kind {
++ CXToken_Punctuation => token::Kind::Punctuation,
++ CXToken_Literal => token::Kind::Literal,
++ CXToken_Identifier => token::Kind::Identifier,
++ CXToken_Keyword => token::Kind::Keyword,
++ // NB: cexpr is not too happy about comments inside
++ // expressions, so we strip them down here.
++ CXToken_Comment => return None,
++ _ => {
++ error!("Found unexpected token kind: {:?}", token);
++ return None;
++ }
++ };
+
+- Some(token::Token {
+- kind,
+- raw: token.spelling().to_vec().into_boxed_slice(),
++ Some(token::Token {
++ kind,
++ raw: token.spelling().to_vec().into_boxed_slice(),
++ })
+ })
+- }).collect()
++ .collect()
+ }
+ }
+
+@@ -690,11 +732,14 @@
+ let mut tokens = ptr::null_mut();
+ let mut token_count = 0;
+ let range = cursor.extent();
+- let tu = unsafe {
+- clang_Cursor_getTranslationUnit(cursor.x)
+- };
++ let tu = unsafe { clang_Cursor_getTranslationUnit(cursor.x) };
+ unsafe { clang_tokenize(tu, range, &mut tokens, &mut token_count) };
+- Self { cursor, tu, tokens, token_count }
++ Self {
++ cursor,
++ tu,
++ tokens,
++ token_count,
++ }
+ }
+
+ fn as_slice(&self) -> &[CXToken] {
+@@ -717,7 +762,11 @@
+ fn drop(&mut self) {
+ if !self.tokens.is_null() {
+ unsafe {
+- clang_disposeTokens(self.tu, self.tokens, self.token_count as c_uint);
++ clang_disposeTokens(
++ self.tu,
++ self.tokens,
++ self.token_count as c_uint,
++ );
+ }
+ }
+ }
+@@ -790,9 +839,7 @@
+ Visitor: FnMut(Cursor) -> CXChildVisitResult,
+ {
+ let func: &mut Visitor = unsafe { mem::transmute(data) };
+- let child = Cursor {
+- x: cur,
+- };
++ let child = Cursor { x: cur };
+
+ (*func)(child)
+ }
+@@ -942,8 +989,9 @@
+ fn clang_size_of(&self, ctx: &BindgenContext) -> c_longlong {
+ match self.kind() {
+ // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975
+- CXType_RValueReference |
+- CXType_LValueReference => ctx.target_pointer_size() as c_longlong,
++ CXType_RValueReference | CXType_LValueReference => {
++ ctx.target_pointer_size() as c_longlong
++ }
+ // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813
+ CXType_Auto if self.is_non_deductible_auto_type() => return -6,
+ _ => unsafe { clang_Type_getSizeOf(self.x) },
+@@ -954,8 +1002,9 @@
+ fn clang_align_of(&self, ctx: &BindgenContext) -> c_longlong {
+ match self.kind() {
+ // Work-around https://bugs.llvm.org/show_bug.cgi?id=40975
+- CXType_RValueReference |
+- CXType_LValueReference => ctx.target_pointer_size() as c_longlong,
++ CXType_RValueReference | CXType_LValueReference => {
++ ctx.target_pointer_size() as c_longlong
++ }
+ // Work-around https://bugs.llvm.org/show_bug.cgi?id=40813
+ CXType_Auto if self.is_non_deductible_auto_type() => return -6,
+ _ => unsafe { clang_Type_getAlignOf(self.x) },
+@@ -966,11 +1015,18 @@
+ /// for them.
+ pub fn size(&self, ctx: &BindgenContext) -> usize {
+ let val = self.clang_size_of(ctx);
+- if val < 0 { 0 } else { val as usize }
++ if val < 0 {
++ 0
++ } else {
++ val as usize
++ }
+ }
+
+ /// What is the size of this type?
+- pub fn fallible_size(&self, ctx: &BindgenContext) -> Result<usize, LayoutError> {
++ pub fn fallible_size(
++ &self,
++ ctx: &BindgenContext,
++ ) -> Result<usize, LayoutError> {
+ let val = self.clang_size_of(ctx);
+ if val < 0 {
+ Err(LayoutError::from(val as i32))
+@@ -983,11 +1039,18 @@
+ /// returning `0`.
+ pub fn align(&self, ctx: &BindgenContext) -> usize {
+ let val = self.clang_align_of(ctx);
+- if val < 0 { 0 } else { val as usize }
++ if val < 0 {
++ 0
++ } else {
++ val as usize
++ }
+ }
+
+ /// What is the alignment of this type?
+- pub fn fallible_align(&self, ctx: &BindgenContext) -> Result<usize, LayoutError> {
++ pub fn fallible_align(
++ &self,
++ ctx: &BindgenContext,
++ ) -> Result<usize, LayoutError> {
+ let val = self.clang_align_of(ctx);
+ if val < 0 {
+ Err(LayoutError::from(val as i32))
+@@ -998,7 +1061,10 @@
+
+ /// Get the layout for this type, or an error describing why it does not
+ /// have a valid layout.
+- pub fn fallible_layout(&self, ctx: &BindgenContext) -> Result<::ir::layout::Layout, LayoutError> {
++ pub fn fallible_layout(
++ &self,
++ ctx: &BindgenContext,
++ ) -> Result<::ir::layout::Layout, LayoutError> {
+ use ir::layout::Layout;
+ let size = self.fallible_size(ctx)?;
+ let align = self.fallible_align(ctx)?;
+@@ -1012,7 +1078,7 @@
+ // question correctly. However, that's no reason to panic when
+ // generating bindings for simple C headers with an old libclang.
+ if !clang_Type_getNumTemplateArguments::is_loaded() {
+- return None
++ return None;
+ }
+
+ let n = unsafe { clang_Type_getNumTemplateArguments(self.x) };
+@@ -1027,12 +1093,10 @@
+ /// If this type is a class template specialization, return its
+ /// template arguments. Otherwise, return None.
+ pub fn template_args(&self) -> Option<TypeTemplateArgIterator> {
+- self.num_template_args().map(|n| {
+- TypeTemplateArgIterator {
+- x: self.x,
+- length: n,
+- index: 0,
+- }
++ self.num_template_args().map(|n| TypeTemplateArgIterator {
++ x: self.x,
++ length: n,
++ index: 0,
+ })
+ }
+
+@@ -1041,12 +1105,11 @@
+ /// Returns None if the type is not a function prototype.
+ pub fn args(&self) -> Option<Vec<Type>> {
+ self.num_args().ok().map(|num| {
+- (0..num).map(|i| {
+- Type {
++ (0..num)
++ .map(|i| Type {
+ x: unsafe { clang_getArgType(self.x, i as c_uint) },
+- }
+- })
+- .collect()
++ })
++ .collect()
+ })
+ }
+
+@@ -1056,11 +1119,14 @@
+ pub fn num_args(&self) -> Result<u32, ()> {
+ unsafe {
+ let w = clang_getNumArgTypes(self.x);
+- if w == -1 { Err(()) } else { Ok(w as u32) }
++ if w == -1 {
++ Err(())
++ } else {
++ Ok(w as u32)
++ }
+ }
+ }
+
+-
+ /// Given that this type is a pointer type, return the type that it points
+ /// to.
+ pub fn pointee_type(&self) -> Option<Type> {
+@@ -1126,7 +1192,11 @@
+ let rt = Type {
+ x: unsafe { clang_getResultType(self.x) },
+ };
+- if rt.is_valid() { Some(rt) } else { None }
++ if rt.is_valid() {
++ Some(rt)
++ } else {
++ None
++ }
+ }
+
+ /// Given that this type is a function type, get its calling convention. If
+@@ -1186,15 +1256,19 @@
+ // This is terrible :(
+ fn hacky_parse_associated_type<S: AsRef<str>>(spelling: S) -> bool {
+ lazy_static! {
+- static ref ASSOC_TYPE_RE: regex::Regex =
+- regex::Regex::new(r"typename type\-parameter\-\d+\-\d+::.+").unwrap();
++ static ref ASSOC_TYPE_RE: regex::Regex = regex::Regex::new(
++ r"typename type\-parameter\-\d+\-\d+::.+"
++ )
++ .unwrap();
+ }
+ ASSOC_TYPE_RE.is_match(spelling.as_ref())
+ }
+
+ self.kind() == CXType_Unexposed &&
+ (hacky_parse_associated_type(self.spelling()) ||
+- hacky_parse_associated_type(self.canonical_type().spelling()))
++ hacky_parse_associated_type(
++ self.canonical_type().spelling(),
++ ))
+ }
+ }
+
+@@ -1263,20 +1337,9 @@
+ let mut col = 0;
+ let mut off = 0;
+ clang_getSpellingLocation(
+- self.x,
+- &mut file,
+- &mut line,
+- &mut col,
+- &mut off,
++ self.x, &mut file, &mut line, &mut col, &mut off,
+ );
+- (
+- File {
+- x: file,
+- },
+- line as usize,
+- col as usize,
+- off as usize,
+- )
++ (File { x: file }, line as usize, col as usize, off as usize)
+ }
+ }
+ }
+@@ -1375,14 +1438,14 @@
+ self.index += 1;
+ Some(CommentAttribute {
+ name: unsafe {
+- cxstring_into_string(
+- clang_HTMLStartTag_getAttrName(self.x, idx),
+- )
++ cxstring_into_string(clang_HTMLStartTag_getAttrName(
++ self.x, idx,
++ ))
+ },
+ value: unsafe {
+- cxstring_into_string(
+- clang_HTMLStartTag_getAttrValue(self.x, idx),
+- )
++ cxstring_into_string(clang_HTMLStartTag_getAttrValue(
++ self.x, idx,
++ ))
+ },
+ })
+ } else {
+@@ -1508,9 +1571,7 @@
+ if tu.is_null() {
+ None
+ } else {
+- Some(TranslationUnit {
+- x: tu,
+- })
++ Some(TranslationUnit { x: tu })
+ }
+ }
+
+@@ -1552,7 +1613,6 @@
+ }
+ }
+
+-
+ /// A diagnostic message generated while parsing a translation unit.
+ pub struct Diagnostic {
+ x: CXDiagnostic,
+@@ -1615,8 +1675,7 @@
+ write!(
+ fmt,
+ "UnsavedFile(name: {:?}, contents: {:?})",
+- self.name,
+- self.contents
++ self.name, self.contents
+ )
+ }
+ }
+@@ -1672,7 +1731,11 @@
+ if templ_kind != CXCursor_NoDeclFound {
+ print_indent(
+ depth,
+- format!(" {}template-kind = {}", prefix, kind_to_str(templ_kind)),
++ format!(
++ " {}template-kind = {}",
++ prefix,
++ kind_to_str(templ_kind)
++ ),
+ );
+ }
+ if let Some(usr) = c.usr() {
+@@ -1769,18 +1832,18 @@
+ depth,
+ format!(" {}spelling = \"{}\"", prefix, ty.spelling()),
+ );
+- let num_template_args = if clang_Type_getNumTemplateArguments::is_loaded() {
+- unsafe { clang_Type_getNumTemplateArguments(ty.x) }
+- } else {
+- -1
+- };
++ let num_template_args =
++ if clang_Type_getNumTemplateArguments::is_loaded() {
++ unsafe { clang_Type_getNumTemplateArguments(ty.x) }
++ } else {
++ -1
++ };
+ if num_template_args >= 0 {
+ print_indent(
+ depth,
+ format!(
+ " {}number-of-template-args = {}",
+- prefix,
+- num_template_args
++ prefix, num_template_args
+ ),
+ );
+ }
+@@ -1882,7 +1945,8 @@
+ let mut found_cant_eval = false;
+ cursor.visit(|c| {
+ if c.kind() == CXCursor_TypeRef &&
+- c.cur_type().canonical_type().kind() == CXType_Unexposed {
++ c.cur_type().canonical_type().kind() == CXType_Unexposed
++ {
+ found_cant_eval = true;
+ return CXChildVisit_Break;
+ }
+@@ -1922,7 +1986,7 @@
+ if !clang_EvalResult_isUnsignedInt::is_loaded() {
+ // FIXME(emilio): There's no way to detect underflow here, and clang
+ // will just happily give us a value.
+- return Some(unsafe { clang_EvalResult_getAsInt(self.x) } as i64)
++ return Some(unsafe { clang_EvalResult_getAsInt(self.x) } as i64);
+ }
+
+ if unsafe { clang_EvalResult_isUnsignedInt(self.x) } != 0 {
+@@ -1931,7 +1995,7 @@
+ return None;
+ }
+
+- return Some(value as i64)
++ return Some(value as i64);
+ }
+
+ let value = unsafe { clang_EvalResult_getAsLongLong(self.x) };
+diff --git a/third_party/rust/bindgen/src/codegen/bitfield_unit.rs b/third_party/rust/bindgen/src/codegen/bitfield_unit.rs
+--- a/third_party/rust/bindgen/src/codegen/bitfield_unit.rs
++++ b/third_party/rust/bindgen/src/codegen/bitfield_unit.rs
+@@ -1,19 +1,14 @@
+ #[repr(C)]
+ #[derive(Copy, Clone, Debug, Default, Eq, Hash, Ord, PartialEq, PartialOrd)]
+-pub struct __BindgenBitfieldUnit<Storage, Align>
+-{
++pub struct __BindgenBitfieldUnit<Storage, Align> {
+ storage: Storage,
+ align: [Align; 0],
+ }
+
+-impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align>
+-{
++impl<Storage, Align> __BindgenBitfieldUnit<Storage, Align> {
+ #[inline]
+ pub const fn new(storage: Storage) -> Self {
+- Self {
+- storage,
+- align: [],
+- }
++ Self { storage, align: [] }
+ }
+ }
+
+@@ -28,12 +23,11 @@
+ let byte_index = index / 8;
+ let byte = self.storage.as_ref()[byte_index];
+
+- let bit_index =
+- if cfg!(target_endian = "big") {
+- 7 - (index % 8)
+- } else {
+- index % 8
+- };
++ let bit_index = if cfg!(target_endian = "big") {
++ 7 - (index % 8)
++ } else {
++ index % 8
++ };
+
+ let mask = 1 << bit_index;
+
+@@ -47,12 +41,11 @@
+ let byte_index = index / 8;
+ let byte = &mut self.storage.as_mut()[byte_index];
+
+- let bit_index =
+- if cfg!(target_endian = "big") {
+- 7 - (index % 8)
+- } else {
+- index % 8
+- };
++ let bit_index = if cfg!(target_endian = "big") {
++ 7 - (index % 8)
++ } else {
++ index % 8
++ };
+
+ let mask = 1 << bit_index;
+ if val {
+@@ -66,18 +59,20 @@
+ pub fn get(&self, bit_offset: usize, bit_width: u8) -> u64 {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+- debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
++ debug_assert!(
++ (bit_offset + (bit_width as usize)) / 8 <=
++ self.storage.as_ref().len()
++ );
+
+ let mut val = 0;
+
+ for i in 0..(bit_width as usize) {
+ if self.get_bit(i + bit_offset) {
+- let index =
+- if cfg!(target_endian = "big") {
+- bit_width as usize - 1 - i
+- } else {
+- i
+- };
++ let index = if cfg!(target_endian = "big") {
++ bit_width as usize - 1 - i
++ } else {
++ i
++ };
+ val |= 1 << index;
+ }
+ }
+@@ -89,17 +84,19 @@
+ pub fn set(&mut self, bit_offset: usize, bit_width: u8, val: u64) {
+ debug_assert!(bit_width <= 64);
+ debug_assert!(bit_offset / 8 < self.storage.as_ref().len());
+- debug_assert!((bit_offset + (bit_width as usize)) / 8 <= self.storage.as_ref().len());
++ debug_assert!(
++ (bit_offset + (bit_width as usize)) / 8 <=
++ self.storage.as_ref().len()
++ );
+
+ for i in 0..(bit_width as usize) {
+ let mask = 1 << i;
+ let val_bit_is_set = val & mask == mask;
+- let index =
+- if cfg!(target_endian = "big") {
+- bit_width as usize - 1 - i
+- } else {
+- i
+- };
++ let index = if cfg!(target_endian = "big") {
++ bit_width as usize - 1 - i
++ } else {
++ i
++ };
+ self.set_bit(index + bit_offset, val_bit_is_set);
+ }
+ }
+diff --git a/third_party/rust/bindgen/src/codegen/bitfield_unit_tests.rs b/third_party/rust/bindgen/src/codegen/bitfield_unit_tests.rs
+--- a/third_party/rust/bindgen/src/codegen/bitfield_unit_tests.rs
++++ b/third_party/rust/bindgen/src/codegen/bitfield_unit_tests.rs
+@@ -26,7 +26,8 @@
+
+ #[test]
+ fn bitfield_unit_get_bit() {
+- let unit = __BindgenBitfieldUnit::<[u8; 2], u64>::new([0b10011101, 0b00011101]);
++ let unit =
++ __BindgenBitfieldUnit::<[u8; 2], u64>::new([0b10011101, 0b00011101]);
+
+ let mut bits = vec![];
+ for i in 0..16 {
+@@ -35,32 +36,21 @@
+
+ println!();
+ println!("bits = {:?}", bits);
+- assert_eq!(bits, &[
+- // 0b10011101
+- true,
+- false,
+- true,
+- true,
+- true,
+- false,
+- false,
+- true ,
+-
+- // 0b00011101
+- true,
+- false,
+- true,
+- true,
+- true,
+- false,
+- false,
+- false
+- ]);
++ assert_eq!(
++ bits,
++ &[
++ // 0b10011101
++ true, false, true, true, true, false, false, true,
++ // 0b00011101
++ true, false, true, true, true, false, false, false
++ ]
++ );
+ }
+
+ #[test]
+ fn bitfield_unit_set_bit() {
+- let mut unit = __BindgenBitfieldUnit::<[u8; 2], u64>::new([0b00000000, 0b00000000]);
++ let mut unit =
++ __BindgenBitfieldUnit::<[u8; 2], u64>::new([0b00000000, 0b00000000]);
+
+ for i in 0..16 {
+ if i % 3 == 0 {
+@@ -72,7 +62,8 @@
+ assert_eq!(unit.get_bit(i), i % 3 == 0);
+ }
+
+- let mut unit = __BindgenBitfieldUnit::<[u8; 2], u64>::new([0b11111111, 0b11111111]);
++ let mut unit =
++ __BindgenBitfieldUnit::<[u8; 2], u64>::new([0b11111111, 0b11111111]);
+
+ for i in 0..16 {
+ if i % 3 == 0 {
+@@ -87,15 +78,39 @@
+
+ #[test]
+ fn bitfield_unit_align() {
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u8>>(), mem::align_of::<u8>());
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u16>>(), mem::align_of::<u16>());
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u32>>(), mem::align_of::<u32>());
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u64>>(), mem::align_of::<u64>());
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u8>>(),
++ mem::align_of::<u8>()
++ );
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u16>>(),
++ mem::align_of::<u16>()
++ );
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u32>>(),
++ mem::align_of::<u32>()
++ );
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 1], u64>>(),
++ mem::align_of::<u64>()
++ );
+
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u8>>(), mem::align_of::<u8>());
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u16>>(), mem::align_of::<u16>());
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u32>>(), mem::align_of::<u32>());
+- assert_eq!(mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u64>>(), mem::align_of::<u64>());
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u8>>(),
++ mem::align_of::<u8>()
++ );
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u16>>(),
++ mem::align_of::<u16>()
++ );
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u32>>(),
++ mem::align_of::<u32>()
++ );
++ assert_eq!(
++ mem::align_of::<__BindgenBitfieldUnit<[u8; 8], u64>>(),
++ mem::align_of::<u64>()
++ );
+ }
+
+ macro_rules! bitfield_unit_get {
+diff --git a/third_party/rust/bindgen/src/codegen/error.rs b/third_party/rust/bindgen/src/codegen/error.rs
+--- a/third_party/rust/bindgen/src/codegen/error.rs
++++ b/third_party/rust/bindgen/src/codegen/error.rs
+@@ -20,7 +20,7 @@
+ }
+
+ impl error::Error for Error {
+- fn cause(&self) -> Option<&error::Error> {
++ fn cause(&self) -> Option<&dyn error::Error> {
+ None
+ }
+
+diff --git a/third_party/rust/bindgen/src/codegen/helpers.rs b/third_party/rust/bindgen/src/codegen/helpers.rs
+--- a/third_party/rust/bindgen/src/codegen/helpers.rs
++++ b/third_party/rust/bindgen/src/codegen/helpers.rs
+@@ -17,14 +17,20 @@
+ }
+
+ pub fn repr_list(which_ones: &[&str]) -> TokenStream {
+- let which_ones = which_ones.iter().cloned().map(|one| TokenStream::from_str(one).expect("repr to be valid"));
++ let which_ones = which_ones
++ .iter()
++ .cloned()
++ .map(|one| TokenStream::from_str(one).expect("repr to be valid"));
+ quote! {
+ #[repr( #( #which_ones ),* )]
+ }
+ }
+
+ pub fn derives(which_ones: &[&str]) -> TokenStream {
+- let which_ones = which_ones.iter().cloned().map(|one| Ident::new(one, Span::call_site()));
++ let which_ones = which_ones
++ .iter()
++ .cloned()
++ .map(|one| Ident::new(one, Span::call_site()));
+ quote! {
+ #[derive( #( #which_ones ),* )]
+ }
+@@ -97,7 +103,10 @@
+ }
+
+ /// Integer type of the same size as the given `Layout`.
+-pub fn integer_type(ctx: &BindgenContext, layout: Layout) -> Option<TokenStream> {
++pub fn integer_type(
++ ctx: &BindgenContext,
++ layout: Layout,
++) -> Option<TokenStream> {
+ let name = Layout::known_type_for_size(ctx, layout.size)?;
+ let name = Ident::new(name, Span::call_site());
+ Some(quote! { #name })
+@@ -131,8 +140,8 @@
+ use ir::function::FunctionSig;
+ use ir::layout::Layout;
+ use ir::ty::FloatKind;
++ use proc_macro2::{self, TokenStream};
+ use std::str::FromStr;
+- use proc_macro2::{self, TokenStream};
+
+ pub fn raw_type(ctx: &BindgenContext, name: &str) -> TokenStream {
+ let ident = ctx.rust_ident_raw(name);
+@@ -171,7 +180,8 @@
+ 8 => quote! { f64 },
+ // TODO(emilio): If rust ever gains f128 we should
+ // use it here and below.
+- _ => super::integer_type(ctx, layout).unwrap_or(quote! { f64 }),
++ _ => super::integer_type(ctx, layout)
++ .unwrap_or(quote! { f64 }),
+ }
+ }
+ None => {
+@@ -219,10 +229,7 @@
+ }
+ }
+
+- pub fn float_expr(
+- ctx: &BindgenContext,
+- f: f64,
+- ) -> Result<TokenStream, ()> {
++ pub fn float_expr(ctx: &BindgenContext, f: f64) -> Result<TokenStream, ()> {
+ if f.is_finite() {
+ let val = proc_macro2::Literal::f64_unsuffixed(f);
+
+@@ -261,17 +268,16 @@
+ signature
+ .argument_types()
+ .iter()
+- .map(|&(ref name, _ty)| {
+- match *name {
+- Some(ref name) => {
+- let name = ctx.rust_ident(name);
+- quote! { #name }
+- }
+- None => {
+- unnamed_arguments += 1;
+- let name = ctx.rust_ident(format!("arg{}", unnamed_arguments));
+- quote! { #name }
+- }
++ .map(|&(ref name, _ty)| match *name {
++ Some(ref name) => {
++ let name = ctx.rust_ident(name);
++ quote! { #name }
++ }
++ None => {
++ unnamed_arguments += 1;
++ let name =
++ ctx.rust_ident(format!("arg{}", unnamed_arguments));
++ quote! { #name }
+ }
+ })
+ .collect()
+diff --git a/third_party/rust/bindgen/src/codegen/impl_debug.rs b/third_party/rust/bindgen/src/codegen/impl_debug.rs
+--- a/third_party/rust/bindgen/src/codegen/impl_debug.rs
++++ b/third_party/rust/bindgen/src/codegen/impl_debug.rs
+@@ -1,7 +1,7 @@
+ use ir::comp::{BitfieldUnit, CompKind, Field, FieldData, FieldMethods};
+ use ir::context::BindgenContext;
+ use ir::item::{HasTypeParamInArray, IsOpaque, Item, ItemCanonicalName};
+-use ir::ty::{RUST_DERIVE_IN_ARRAY_LIMIT, TypeKind};
++use ir::ty::{TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
+ use proc_macro2;
+
+ pub fn gen_debug_impl(
+@@ -177,9 +177,10 @@
+ TypeKind::Array(_, len) => {
+ // Generics are not required to implement Debug
+ if self.has_type_param_in_array(ctx) {
+- Some(
+- (format!("{}: Array with length {}", name, len), vec![]),
+- )
++ Some((
++ format!("{}: Array with length {}", name, len),
++ vec![],
++ ))
+ } else if len < RUST_DERIVE_IN_ARRAY_LIMIT {
+ // The simple case
+ debug_print(name, quote! { #name_ident })
+@@ -187,9 +188,7 @@
+ if ctx.options().use_core {
+ // There is no String in core; reducing field visibility to avoid breaking
+ // no_std setups.
+- Some((
+- format!("{}: [...]", name), vec![]
+- ))
++ Some((format!("{}: [...]", name), vec![]))
+ } else {
+ // Let's implement our own print function
+ Some((
+@@ -209,16 +208,14 @@
+ if ctx.options().use_core {
+ // There is no format! in core; reducing field visibility to avoid breaking
+ // no_std setups.
+- Some((
+- format!("{}(...)", name), vec![]
+- ))
++ Some((format!("{}(...)", name), vec![]))
+ } else {
+ let self_ids = 0..len;
+ Some((
+ format!("{}({{}})", name),
+ vec![quote! {
+ #(format!("{:?}", self.#self_ids)),*
+- }]
++ }],
+ ))
+ }
+ }
+@@ -235,8 +232,9 @@
+ let inner_type = ctx.resolve_type(inner).canonical_type(ctx);
+ match *inner_type.kind() {
+ TypeKind::Function(ref sig)
+- if !sig.function_pointers_can_derive() => {
+- Some((format!("{}: FunctionPointer", name), vec![]))
++ if !sig.function_pointers_can_derive() =>
++ {
++ Some((format!("{}: FunctionPointer", name), vec![]))
+ }
+ _ => debug_print(name, quote! { #name_ident }),
+ }
+diff --git a/third_party/rust/bindgen/src/codegen/impl_partialeq.rs b/third_party/rust/bindgen/src/codegen/impl_partialeq.rs
+--- a/third_party/rust/bindgen/src/codegen/impl_partialeq.rs
++++ b/third_party/rust/bindgen/src/codegen/impl_partialeq.rs
+@@ -1,4 +1,3 @@
+-
+ use ir::comp::{CompInfo, CompKind, Field, FieldMethods};
+ use ir::context::BindgenContext;
+ use ir::item::{IsOpaque, Item};
+@@ -50,15 +49,17 @@
+ let name = fd.name().unwrap();
+ tokens.push(gen_field(ctx, ty_item, name));
+ }
+- Field::Bitfields(ref bu) => for bitfield in bu.bitfields() {
+- if let Some(_) = bitfield.name() {
+- let getter_name = bitfield.getter_name();
+- let name_ident = ctx.rust_ident_raw(getter_name);
+- tokens.push(quote! {
+- self.#name_ident () == other.#name_ident ()
+- });
++ Field::Bitfields(ref bu) => {
++ for bitfield in bu.bitfields() {
++ if let Some(_) = bitfield.name() {
++ let getter_name = bitfield.getter_name();
++ let name_ident = ctx.rust_ident_raw(getter_name);
++ tokens.push(quote! {
++ self.#name_ident () == other.#name_ident ()
++ });
++ }
+ }
+- },
++ }
+ }
+ }
+ }
+@@ -70,8 +71,14 @@
+ })
+ }
+
+-fn gen_field(ctx: &BindgenContext, ty_item: &Item, name: &str) -> proc_macro2::TokenStream {
+- fn quote_equals(name_ident: proc_macro2::Ident) -> proc_macro2::TokenStream {
++fn gen_field(
++ ctx: &BindgenContext,
++ ty_item: &Item,
++ name: &str,
++) -> proc_macro2::TokenStream {
++ fn quote_equals(
++ name_ident: proc_macro2::Ident,
++ ) -> proc_macro2::TokenStream {
+ quote! { self.#name_ident == other.#name_ident }
+ }
+
+@@ -106,20 +113,22 @@
+ }
+ }
+
+- TypeKind::Array(_, len) => if len <= RUST_DERIVE_IN_ARRAY_LIMIT {
+- quote_equals(name_ident)
+- } else {
+- quote! {
+- &self. #name_ident [..] == &other. #name_ident [..]
++ TypeKind::Array(_, len) => {
++ if len <= RUST_DERIVE_IN_ARRAY_LIMIT {
++ quote_equals(name_ident)
++ } else {
++ quote! {
++ &self. #name_ident [..] == &other. #name_ident [..]
++ }
+ }
+- },
++ }
+ TypeKind::Vector(_, len) => {
+ let self_ids = 0..len;
+ let other_ids = 0..len;
+ quote! {
+ #(self.#self_ids == other.#other_ids &&)* true
+ }
+- },
++ }
+
+ TypeKind::ResolvedTypeRef(t) |
+ TypeKind::TemplateAlias(t, _) |
+diff --git a/third_party/rust/bindgen/src/codegen/mod.rs b/third_party/rust/bindgen/src/codegen/mod.rs
+--- a/third_party/rust/bindgen/src/codegen/mod.rs
++++ b/third_party/rust/bindgen/src/codegen/mod.rs
+@@ -1,7 +1,7 @@
++mod error;
++mod helpers;
+ mod impl_debug;
+ mod impl_partialeq;
+-mod error;
+-mod helpers;
+ pub mod struct_layout;
+
+ #[cfg(test)]
+@@ -18,12 +18,15 @@
+ use ir::analysis::{HasVtable, Sizedness};
+ use ir::annotations::FieldAccessorKind;
+ use ir::comment;
+-use ir::comp::{Base, Bitfield, BitfieldUnit, CompInfo, CompKind, Field,
+- FieldData, FieldMethods, Method, MethodKind};
++use ir::comp::{
++ Base, Bitfield, BitfieldUnit, CompInfo, CompKind, Field, FieldData,
++ FieldMethods, Method, MethodKind,
++};
+ use ir::context::{BindgenContext, ItemId};
+-use ir::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault,
+- CanDeriveHash, CanDerivePartialOrd, CanDeriveOrd,
+- CanDerivePartialEq, CanDeriveEq, CanDerive};
++use ir::derive::{
++ CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq,
++ CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd,
++};
+ use ir::dot;
+ use ir::enum_ty::{Enum, EnumVariant, EnumVariantValue};
+ use ir::function::{Abi, Function, FunctionKind, FunctionSig, Linkage};
+@@ -33,12 +36,14 @@
+ use ir::layout::Layout;
+ use ir::module::Module;
+ use ir::objc::{ObjCInterface, ObjCMethod};
+-use ir::template::{AsTemplateParam, TemplateInstantiation, TemplateParameters};
++use ir::template::{
++ AsTemplateParam, TemplateInstantiation, TemplateParameters,
++};
+ use ir::ty::{Type, TypeKind};
+ use ir::var::Var;
+
++use proc_macro2::{self, Ident, Span};
+ use quote::TokenStreamExt;
+-use proc_macro2::{self, Ident, Span};
+
+ use std;
+ use std::borrow::Cow;
+@@ -48,12 +53,15 @@
+ use std::iter;
+ use std::ops;
+ use std::str::FromStr;
+-use {HashMap, HashSet, Entry};
++use {Entry, HashMap, HashSet};
+
+ // Name of type defined in constified enum module
+ pub static CONSTIFIED_ENUM_MODULE_REPR_NAME: &'static str = "Type";
+
+-fn top_level_path(ctx: &BindgenContext, item: &Item) -> Vec<proc_macro2::TokenStream> {
++fn top_level_path(
++ ctx: &BindgenContext,
++ item: &Item,
++) -> Vec<proc_macro2::TokenStream> {
+ let mut path = vec![quote! { self }];
+
+ if ctx.options().enable_cxx_namespaces {
+@@ -65,7 +73,10 @@
+ path
+ }
+
+-fn root_import(ctx: &BindgenContext, module: &Item) -> proc_macro2::TokenStream {
++fn root_import(
++ ctx: &BindgenContext,
++ module: &Item,
++) -> proc_macro2::TokenStream {
+ assert!(ctx.options().enable_cxx_namespaces, "Somebody messed it up");
+ assert!(module.is_module());
+
+@@ -75,7 +86,6 @@
+ let root_ident = ctx.rust_ident(&root);
+ path.push(quote! { #root_ident });
+
+-
+ let mut tokens = quote! {};
+ tokens.append_separated(path, quote!(::));
+
+@@ -268,10 +278,7 @@
+ ctx: &BindgenContext,
+ item: &Item,
+ ) {
+- let item = item.id()
+- .into_resolver()
+- .through_type_refs()
+- .resolve(ctx);
++ let item = item.id().into_resolver().through_type_refs().resolve(ctx);
+
+ match *item.expect_type().kind() {
+ TypeKind::UnresolvedTypeRef(..) => {
+@@ -297,13 +304,17 @@
+ TypeKind::ObjCId |
+ TypeKind::ObjCSel |
+ TypeKind::TemplateInstantiation(..) => return,
+- _ => {},
++ _ => {}
+ }
+
+- let params: Vec<_> = item.used_template_params(ctx).iter().map(|p| {
+- p.try_to_rust_ty(ctx, &())
+- .expect("template params cannot fail to be a rust type")
+- }).collect();
++ let params: Vec<_> = item
++ .used_template_params(ctx)
++ .iter()
++ .map(|p| {
++ p.try_to_rust_ty(ctx, &())
++ .expect("template params cannot fail to be a rust type")
++ })
++ .collect();
+ if !params.is_empty() {
+ self.append_all(quote! {
+ < #( #params ),* >
+@@ -431,7 +442,9 @@
+ if let Some(raw_lines) = ctx.options().module_lines.get(&path) {
+ for raw_line in raw_lines {
+ found_any = true;
+- result.push(proc_macro2::TokenStream::from_str(raw_line).unwrap());
++ result.push(
++ proc_macro2::TokenStream::from_str(raw_line).unwrap(),
++ );
+ }
+ }
+
+@@ -500,7 +513,8 @@
+ });
+ }
+ VarType::Int(val) => {
+- let int_kind = self.ty()
++ let int_kind = self
++ .ty()
+ .into_resolver()
+ .through_type_aliases()
+ .through_type_refs()
+@@ -570,7 +584,7 @@
+ }
+
+ let maybe_mut = if self.is_const() {
+- quote! { }
++ quote! {}
+ } else {
+ quote! { mut }
+ };
+@@ -627,13 +641,14 @@
+ return;
+ }
+
+- let inner_item = inner.into_resolver()
+- .through_type_refs()
+- .resolve(ctx);
++ let inner_item =
++ inner.into_resolver().through_type_refs().resolve(ctx);
+ let name = item.canonical_name(ctx);
+
+ let inner_rust_type = {
+- if let TypeKind::Function(fnsig) = inner_item.kind().expect_type().kind() {
++ if let TypeKind::Function(fnsig) =
++ inner_item.kind().expect_type().kind()
++ {
+ utils::fnsig_block(ctx, fnsig)
+ } else {
+ panic!("invalid block typedef: {:?}", inner_item)
+@@ -656,16 +671,15 @@
+ result.saw_block();
+ }
+ TypeKind::Comp(ref ci) => ci.codegen(ctx, result, item),
+- TypeKind::TemplateAlias(inner, _) |
+- TypeKind::Alias(inner) => {
+- let inner_item = inner.into_resolver()
+- .through_type_refs()
+- .resolve(ctx);
++ TypeKind::TemplateAlias(inner, _) | TypeKind::Alias(inner) => {
++ let inner_item =
++ inner.into_resolver().through_type_refs().resolve(ctx);
+ let name = item.canonical_name(ctx);
+ let path = item.canonical_path(ctx);
+
+ {
+- let through_type_aliases = inner.into_resolver()
++ let through_type_aliases = inner
++ .into_resolver()
+ .through_type_refs()
+ .through_type_aliases()
+ .resolve(ctx);
+@@ -720,8 +734,7 @@
+ warn!(
+ "Item contained invalid named type, skipping: \
+ {:?}, {:?}",
+- item,
+- inner_item
++ item, inner_item
+ );
+ return;
+ }
+@@ -737,15 +750,12 @@
+
+ // We prefer using `pub use` over `pub type` because of:
+ // https://github.com/rust-lang/rust/issues/26264
+- if inner_rust_type.to_string()
+- .chars()
+- .all(|c| match c {
+- // These are the only characters allowed in simple
+- // paths, eg `good::dogs::Bront`.
+- 'A'...'Z' | 'a'...'z' | '0'...'9' | ':' | '_' | ' ' => true,
+- _ => false,
+- }) &&
+- outer_params.is_empty() &&
++ if inner_rust_type.to_string().chars().all(|c| match c {
++ // These are the only characters allowed in simple
++ // paths, eg `good::dogs::Bront`.
++ 'A'..='Z' | 'a'..='z' | '0'..='9' | ':' | '_' | ' ' => true,
++ _ => false,
++ }) && outer_params.is_empty() &&
+ inner_item.expect_type().canonical_type(ctx).is_enum()
+ {
+ tokens.append_all(quote! {
+@@ -764,10 +774,14 @@
+ pub type #rust_name
+ });
+
+- let params: Vec<_> = outer_params.into_iter()
++ let params: Vec<_> = outer_params
++ .into_iter()
+ .filter_map(|p| p.as_template_param(ctx, &()))
+ .collect();
+- if params.iter().any(|p| ctx.resolve_type(*p).is_invalid_type_param()) {
++ if params
++ .iter()
++ .any(|p| ctx.resolve_type(*p).is_invalid_type_param())
++ {
+ warn!(
+ "Item contained invalid template \
+ parameter: {:?}",
+@@ -775,10 +789,14 @@
+ );
+ return;
+ }
+- let params: Vec<_> = params.iter().map(|p| {
+- p.try_to_rust_ty(ctx, &())
+- .expect("type parameters can always convert to rust ty OK")
+- }).collect();
++ let params: Vec<_> = params
++ .iter()
++ .map(|p| {
++ p.try_to_rust_ty(ctx, &()).expect(
++ "type parameters can always convert to rust ty OK",
++ )
++ })
++ .collect();
+
+ if !params.is_empty() {
+ tokens.append_all(quote! {
+@@ -1038,7 +1056,8 @@
+ // `BitfieldUnit` and `Bitfield`.
+ assert!(self.bitfield_width().is_none());
+
+- let field_item = self.ty().into_resolver().through_type_refs().resolve(ctx);
++ let field_item =
++ self.ty().into_resolver().through_type_refs().resolve(ctx);
+ let field_ty = field_item.expect_type();
+ let mut ty = self.ty().to_rust_ty_or_opaque(ctx, &());
+ ty.append_implicit_template_params(ctx, field_item);
+@@ -1082,10 +1101,10 @@
+ }
+ }
+
+- let field_name =
+- self.name()
+- .map(|name| ctx.rust_mangle(name).into_owned())
+- .expect("Each field should have a name in codegen!");
++ let field_name = self
++ .name()
++ .map(|name| ctx.rust_mangle(name).into_owned())
++ .expect("Each field should have a name in codegen!");
+ let field_ident = ctx.rust_ident_raw(field_name.as_str());
+
+ if !parent.is_union() {
+@@ -1096,9 +1115,10 @@
+ }
+ }
+
+- let is_private = self.annotations().private_fields().unwrap_or(
+- fields_should_be_private,
+- );
++ let is_private = self
++ .annotations()
++ .private_fields()
++ .unwrap_or(fields_should_be_private);
+
+ let accessor_kind =
+ self.annotations().accessor_kind().unwrap_or(accessor_kind);
+@@ -1168,7 +1188,10 @@
+ impl BitfieldUnit {
+ /// Get the constructor name for this bitfield unit.
+ fn ctor_name(&self) -> proc_macro2::TokenStream {
+- let ctor_name = Ident::new(&format!("new_bitfield_{}", self.nth()), Span::call_site());
++ let ctor_name = Ident::new(
++ &format!("new_bitfield_{}", self.nth()),
++ Span::call_site(),
++ );
+ quote! {
+ #ctor_name
+ }
+@@ -1190,9 +1213,9 @@
+ mut ctor_impl: proc_macro2::TokenStream,
+ ) -> proc_macro2::TokenStream {
+ let bitfield_ty = ctx.resolve_type(self.ty());
+- let bitfield_ty_layout = bitfield_ty.layout(ctx).expect(
+- "Bitfield without layout? Gah!",
+- );
++ let bitfield_ty_layout = bitfield_ty
++ .layout(ctx)
++ .expect("Bitfield without layout? Gah!");
+ let bitfield_int_ty = helpers::blob(ctx, bitfield_ty_layout);
+
+ let offset = self.offset_into_unit();
+@@ -1306,11 +1329,7 @@
+ ctor_params.push(quote! {
+ #param_name : #bitfield_ty
+ });
+- ctor_impl = bf.extend_ctor_impl(
+- ctx,
+- param_name,
+- ctor_impl,
+- );
++ ctor_impl = bf.extend_ctor_impl(ctx, param_name, ctor_impl);
+ }
+
+ if generate_ctor {
+@@ -1373,19 +1392,20 @@
+ let bitfield_ty_item = ctx.resolve_item(self.ty());
+ let bitfield_ty = bitfield_ty_item.expect_type();
+
+- let bitfield_ty_layout = bitfield_ty.layout(ctx).expect(
+- "Bitfield without layout? Gah!",
+- );
+- let bitfield_int_ty = match helpers::integer_type(ctx, bitfield_ty_layout) {
+- Some(int_ty) => {
+- *bitfield_representable_as_int = true;
+- int_ty
+- }
+- None => {
+- *bitfield_representable_as_int = false;
+- return;
+- }
+- };
++ let bitfield_ty_layout = bitfield_ty
++ .layout(ctx)
++ .expect("Bitfield without layout? Gah!");
++ let bitfield_int_ty =
++ match helpers::integer_type(ctx, bitfield_ty_layout) {
++ Some(int_ty) => {
++ *bitfield_representable_as_int = true;
++ int_ty
++ }
++ None => {
++ *bitfield_representable_as_int = false;
++ return;
++ }
++ };
+
+ let bitfield_ty =
+ bitfield_ty.to_rust_ty_or_opaque(ctx, bitfield_ty_item);
+@@ -1529,7 +1549,8 @@
+ let codegen_depth = item.codegen_depth(ctx);
+ let fields_should_be_private =
+ item.annotations().private_fields().unwrap_or(false);
+- let struct_accessor_kind = item.annotations()
++ let struct_accessor_kind = item
++ .annotations()
+ .accessor_kind()
+ .unwrap_or(FieldAccessorKind::None);
+ for field in self.fields() {
+@@ -1584,7 +1605,10 @@
+ } else {
+ explicit_align = Some(layout.align);
+ if !ctx.options().rust_features.repr_align {
+- let ty = helpers::blob(ctx, Layout::new(0, layout.align));
++ let ty = helpers::blob(
++ ctx,
++ Layout::new(0, layout.align),
++ );
+ fields.push(quote! {
+ pub __bindgen_align: #ty ,
+ });
+@@ -1668,7 +1692,7 @@
+ < #( #generic_param_names ),* >
+ }
+ } else {
+- quote! { }
++ quote! {}
+ };
+
+ let mut attributes = vec![];
+@@ -1682,7 +1706,11 @@
+ if packed && !is_opaque {
+ let n = layout.map_or(1, |l| l.align);
+ assert!(ctx.options().rust_features().repr_packed_n || n == 1);
+- let packed_repr = if n == 1 { "packed".to_string() } else { format!("packed({})", n) };
++ let packed_repr = if n == 1 {
++ "packed".to_string()
++ } else {
++ format!("packed({})", n)
++ };
+ attributes.push(attributes::repr_list(&["C", &packed_repr]));
+ } else {
+ attributes.push(attributes::repr("C"));
+@@ -1699,13 +1727,12 @@
+ }
+ }
+
+-
+ let mut derives = vec![];
+ if item.can_derive_debug(ctx) {
+ derives.push("Debug");
+ } else {
+- needs_debug_impl = ctx.options().derive_debug &&
+- ctx.options().impl_debug
++ needs_debug_impl =
++ ctx.options().derive_debug && ctx.options().impl_debug
+ }
+
+ if item.can_derive_default(ctx) {
+@@ -1750,10 +1777,10 @@
+ if item.can_derive_partialeq(ctx) {
+ derives.push("PartialEq");
+ } else {
+- needs_partialeq_impl =
+- ctx.options().derive_partialeq &&
++ needs_partialeq_impl = ctx.options().derive_partialeq &&
+ ctx.options().impl_partialeq &&
+- ctx.lookup_can_derive_partialeq_or_partialord(item.id()) == CanDerive::Manually;
++ ctx.lookup_can_derive_partialeq_or_partialord(item.id()) ==
++ CanDerive::Manually;
+ }
+
+ if item.can_derive_eq(ctx) {
+@@ -1827,34 +1854,36 @@
+ let size = layout.size;
+ let align = layout.align;
+
+- let check_struct_align =
+- if align > ctx.target_pointer_size() &&
+- !ctx.options().rust_features().repr_align
+- {
+- None
+- } else {
+- Some(quote! {
+- assert_eq!(#align_of_expr,
+- #align,
+- concat!("Alignment of ", stringify!(#canonical_ident)));
+-
+- })
+- };
++ let check_struct_align = if align >
++ ctx.target_pointer_size() &&
++ !ctx.options().rust_features().repr_align
++ {
++ None
++ } else {
++ Some(quote! {
++ assert_eq!(#align_of_expr,
++ #align,
++ concat!("Alignment of ", stringify!(#canonical_ident)));
++
++ })
++ };
+
+ // FIXME when [issue #465](https://github.com/rust-lang/rust-bindgen/issues/465) ready
+- let too_many_base_vtables = self.base_members()
++ let too_many_base_vtables = self
++ .base_members()
+ .iter()
+ .filter(|base| base.ty.has_vtable(ctx))
+- .count() > 1;
+-
+- let should_skip_field_offset_checks = is_opaque ||
+- too_many_base_vtables;
+-
+- let check_field_offset =
+- if should_skip_field_offset_checks {
+- vec![]
+- } else {
+- let asserts = self.fields()
++ .count() >
++ 1;
++
++ let should_skip_field_offset_checks =
++ is_opaque || too_many_base_vtables;
++
++ let check_field_offset = if should_skip_field_offset_checks
++ {
++ vec![]
++ } else {
++ let asserts = self.fields()
+ .iter()
+ .filter_map(|field| match *field {
+ Field::DataMember(ref f) if f.name().is_some() => Some(f),
+@@ -1879,8 +1908,8 @@
+ })
+ .collect::<Vec<proc_macro2::TokenStream>>();
+
+- asserts
+- };
++ asserts
++ };
+
+ let item = quote! {
+ #[test]
+@@ -1918,7 +1947,8 @@
+ *sig,
+ /* const */
+ false,
+- ).codegen_method(
++ )
++ .codegen_method(
+ ctx,
+ &mut methods,
+ &mut method_names,
+@@ -1983,15 +2013,19 @@
+ }
+
+ if needs_partialeq_impl {
+- if let Some(impl_) = impl_partialeq::gen_partialeq_impl(ctx, self, item, &ty_for_impl) {
+-
++ if let Some(impl_) = impl_partialeq::gen_partialeq_impl(
++ ctx,
++ self,
++ item,
++ &ty_for_impl,
++ ) {
+ let partialeq_bounds = if !generic_param_names.is_empty() {
+ let bounds = generic_param_names.iter().map(|t| {
+ quote! { #t: PartialEq }
+ });
+ quote! { where #( #bounds ),* }
+ } else {
+- quote! { }
++ quote! {}
+ };
+
+ let prefix = ctx.trait_prefix();
+@@ -2067,7 +2101,9 @@
+ _ => panic!("How in the world?"),
+ };
+
+- if let (Abi::ThisCall, false) = (signature.abi(), ctx.options().rust_features().thiscall_abi) {
++ if let (Abi::ThisCall, false) =
++ (signature.abi(), ctx.options().rust_features().thiscall_abi)
++ {
+ return;
+ }
+
+@@ -2118,10 +2154,9 @@
+ // variable called `__bindgen_tmp` we're going to create.
+ if self.is_constructor() {
+ let prefix = ctx.trait_prefix();
+- let tmp_variable_decl =
+- quote! {
+- let mut __bindgen_tmp = ::#prefix::mem::uninitialized()
+- };
++ let tmp_variable_decl = quote! {
++ let mut __bindgen_tmp = ::#prefix::mem::uninitialized()
++ };
+ stmts.push(tmp_variable_decl);
+ exprs[0] = quote! {
+ &mut __bindgen_tmp
+@@ -2152,7 +2187,9 @@
+ let mut attrs = vec![];
+ attrs.push(attributes::inline());
+
+- if signature.must_use() && ctx.options().rust_features().must_use_function {
++ if signature.must_use() &&
++ ctx.options().rust_features().must_use_function
++ {
+ attrs.push(attributes::must_use());
+ }
+
+@@ -2172,21 +2209,21 @@
+ /// The code for this enum will use a Rust enum
+ Rust {
+ /// Indicates whether the generated struct should be #[non_exhaustive]
+- non_exhaustive: bool
++ non_exhaustive: bool,
+ },
+ /// The code for this enum will use a bitfield
+ Bitfield,
+ /// The code for this enum will use consts
+ Consts,
+ /// The code for this enum will use a module containing consts
+- ModuleConsts
++ ModuleConsts,
+ }
+
+ impl EnumVariation {
+ fn is_rust(&self) -> bool {
+ match *self {
+- EnumVariation::Rust{ .. } => true,
+- _ => false
++ EnumVariation::Rust { .. } => true,
++ _ => false,
+ }
+ }
+
+@@ -2195,7 +2232,7 @@
+ fn is_const(&self) -> bool {
+ match *self {
+ EnumVariation::Consts | EnumVariation::ModuleConsts => true,
+- _ => false
++ _ => false,
+ }
+ }
+ }
+@@ -2225,7 +2262,6 @@
+ }
+ }
+
+-
+ /// A helper type to construct different enum variations.
+ enum EnumBuilder<'a> {
+ Rust {
+@@ -2274,16 +2310,14 @@
+ let ident = Ident::new(name, Span::call_site());
+
+ match enum_variation {
+- EnumVariation::Bitfield => {
+- EnumBuilder::Bitfield {
+- codegen_depth: enum_codegen_depth,
+- canonical_name: name,
+- tokens: quote! {
+- #( #attrs )*
+- pub struct #ident (pub #repr);
+- },
+- }
+- }
++ EnumVariation::Bitfield => EnumBuilder::Bitfield {
++ codegen_depth: enum_codegen_depth,
++ canonical_name: name,
++ tokens: quote! {
++ #( #attrs )*
++ pub struct #ident (pub #repr);
++ },
++ },
+
+ EnumVariation::Rust { .. } => {
+ let tokens = quote!();
+@@ -2296,20 +2330,19 @@
+ }
+ }
+
+- EnumVariation::Consts => {
+- EnumBuilder::Consts {
+- variants: vec![
+- quote! {
+- #( #attrs )*
+- pub type #ident = #repr;
+- }
+- ],
+- codegen_depth: enum_codegen_depth,
+- }
+- }
++ EnumVariation::Consts => EnumBuilder::Consts {
++ variants: vec![quote! {
++ #( #attrs )*
++ pub type #ident = #repr;
++ }],
++ codegen_depth: enum_codegen_depth,
++ },
+
+ EnumVariation::ModuleConsts => {
+- let ident = Ident::new(CONSTIFIED_ENUM_MODULE_REPR_NAME, Span::call_site());
++ let ident = Ident::new(
++ CONSTIFIED_ENUM_MODULE_REPR_NAME,
++ Span::call_site(),
++ );
+ let type_definition = quote! {
+ #( #attrs )*
+ pub type #ident = #repr;
+@@ -2343,13 +2376,20 @@
+ let mut doc = quote! {};
+ if ctx.options().generate_comments {
+ if let Some(raw_comment) = variant.comment() {
+- let comment = comment::preprocess(raw_comment, self.codegen_depth());
++ let comment =
++ comment::preprocess(raw_comment, self.codegen_depth());
+ doc = attributes::doc(comment);
+ }
+ }
+
+ match self {
+- EnumBuilder::Rust { attrs, ident, tokens, emitted_any_variants: _, codegen_depth } => {
++ EnumBuilder::Rust {
++ attrs,
++ ident,
++ tokens,
++ emitted_any_variants: _,
++ codegen_depth,
++ } => {
+ let name = ctx.rust_ident(variant_name);
+ EnumBuilder::Rust {
+ attrs,
+@@ -2365,7 +2405,8 @@
+ }
+
+ EnumBuilder::Bitfield { canonical_name, .. } => {
+- if ctx.options().rust_features().associated_const && is_ty_named {
++ if ctx.options().rust_features().associated_const && is_ty_named
++ {
+ let enum_ident = ctx.rust_ident(canonical_name);
+ let variant_ident = ctx.rust_ident(variant_name);
+ result.push(quote! {
+@@ -2390,9 +2431,7 @@
+ self
+ }
+
+- EnumBuilder::Consts {
+- ..
+- } => {
++ EnumBuilder::Consts { .. } => {
+ let constant_name = match mangling_prefix {
+ Some(prefix) => {
+ Cow::Owned(format!("{}_{}", prefix, variant_name))
+@@ -2436,7 +2475,13 @@
+ result: &mut CodegenResult<'b>,
+ ) -> proc_macro2::TokenStream {
+ match self {
+- EnumBuilder::Rust { attrs, ident, tokens, emitted_any_variants, .. } => {
++ EnumBuilder::Rust {
++ attrs,
++ ident,
++ tokens,
++ emitted_any_variants,
++ ..
++ } => {
+ let variants = if !emitted_any_variants {
+ quote!(__bindgen_cannot_repr_c_on_empty_enum = 0)
+ } else {
+@@ -2536,12 +2581,10 @@
+
+ let repr = self.repr().map(|repr| ctx.resolve_type(repr));
+ let repr = match repr {
+- Some(repr) => {
+- match *repr.canonical_type(ctx).kind() {
+- TypeKind::Int(int_kind) => int_kind,
+- _ => panic!("Unexpected type as enum repr"),
+- }
+- }
++ Some(repr) => match *repr.canonical_type(ctx).kind() {
++ TypeKind::Int(int_kind) => int_kind,
++ _ => panic!("Unexpected type as enum repr"),
++ },
+ None => {
+ warn!(
+ "Guessing type of enum! Forward declarations of enums \
+@@ -2580,20 +2623,24 @@
+ match variation {
+ EnumVariation::Rust { non_exhaustive } => {
+ attrs.push(attributes::repr(repr_name));
+- if non_exhaustive && ctx.options().rust_features().non_exhaustive {
++ if non_exhaustive &&
++ ctx.options().rust_features().non_exhaustive
++ {
+ attrs.push(attributes::non_exhaustive());
+- } else if non_exhaustive && !ctx.options().rust_features().non_exhaustive {
++ } else if non_exhaustive &&
++ !ctx.options().rust_features().non_exhaustive
++ {
+ panic!("The rust target you're using doesn't seem to support non_exhaustive enums");
+ }
+- },
++ }
+ EnumVariation::Bitfield => {
+ if ctx.options().rust_features.repr_transparent {
+ attrs.push(attributes::repr("transparent"));
+ } else {
+ attrs.push(attributes::repr("C"));
+ }
+- },
+- _ => {},
++ }
++ _ => {}
+ };
+
+ if let Some(comment) = item.comment(ctx) {
+@@ -2601,7 +2648,8 @@
+ }
+
+ if !variation.is_const() {
+- let mut derives = vec!["Debug", "Copy", "Clone", "PartialEq", "Eq", "Hash"];
++ let mut derives =
++ vec!["Debug", "Copy", "Clone", "PartialEq", "Eq", "Hash"];
+
+ if item.can_derive_partialord(ctx) {
+ derives.push("PartialOrd");
+@@ -2685,9 +2733,9 @@
+ let mut constified_variants = VecDeque::new();
+
+ let mut iter = self.variants().iter().peekable();
+- while let Some(variant) = iter.next().or_else(|| {
+- constified_variants.pop_front()
+- }) {
++ while let Some(variant) =
++ iter.next().or_else(|| constified_variants.pop_front())
++ {
+ if variant.hidden() {
+ continue;
+ }
+@@ -2708,17 +2756,20 @@
+ let parent_name =
+ parent_canonical_name.as_ref().unwrap();
+
+- Cow::Owned(
+- format!("{}_{}", parent_name, variant_name),
+- )
++ Cow::Owned(format!(
++ "{}_{}",
++ parent_name, variant_name
++ ))
+ };
+
+ let existing_variant_name = entry.get();
+ // Use associated constants for named enums.
+ if enum_ty.name().is_some() &&
+- ctx.options().rust_features().associated_const {
++ ctx.options().rust_features().associated_const
++ {
+ let enum_canonical_name = &ident;
+- let variant_name = ctx.rust_ident_raw(&*mangled_name);
++ let variant_name =
++ ctx.rust_ident_raw(&*mangled_name);
+ result.push(quote! {
+ impl #enum_rust_ty {
+ pub const #variant_name : #enum_rust_ty =
+@@ -2772,12 +2823,8 @@
+ parent_canonical_name.as_ref().unwrap();
+
+ Ident::new(
+- &format!(
+- "{}_{}",
+- parent_name,
+- variant_name
+- ),
+- Span::call_site()
++ &format!("{}_{}", parent_name, variant_name),
++ Span::call_site(),
+ )
+ };
+
+@@ -2823,9 +2870,8 @@
+ ctx: &BindgenContext,
+ extra: &Self::Extra,
+ ) -> error::Result<proc_macro2::TokenStream> {
+- self.try_get_layout(ctx, extra).map(|layout| {
+- helpers::blob(ctx, layout)
+- })
++ self.try_get_layout(ctx, extra)
++ .map(|layout| helpers::blob(ctx, layout))
+ }
+ }
+
+@@ -2855,11 +2901,7 @@
+ }
+ }
+
+-impl<T> ToOpaque for T
+-where
+- T: TryToOpaque,
+-{
+-}
++impl<T> ToOpaque for T where T: TryToOpaque {}
+
+ /// Fallible conversion from an IR thing to an *equivalent* Rust type.
+ ///
+@@ -2895,8 +2937,7 @@
+
+ impl<E, T> TryToRustTyOrOpaque for T
+ where
+- T: TryToRustTy<Extra = E>
+- + TryToOpaque<Extra = E>,
++ T: TryToRustTy<Extra = E> + TryToOpaque<Extra = E>,
+ {
+ type Extra = E;
+
+@@ -2905,15 +2946,13 @@
+ ctx: &BindgenContext,
+ extra: &E,
+ ) -> error::Result<proc_macro2::TokenStream> {
+- self.try_to_rust_ty(ctx, extra).or_else(
+- |_| if let Ok(layout) =
+- self.try_get_layout(ctx, extra)
+- {
++ self.try_to_rust_ty(ctx, extra).or_else(|_| {
++ if let Ok(layout) = self.try_get_layout(ctx, extra) {
+ Ok(helpers::blob(ctx, layout))
+ } else {
+ Err(error::Error::NoLayoutForOpaqueBlob)
+- },
+- )
++ }
++ })
+ }
+ }
+
+@@ -2955,15 +2994,14 @@
+ ctx: &BindgenContext,
+ extra: &E,
+ ) -> proc_macro2::TokenStream {
+- self.try_to_rust_ty(ctx, extra).unwrap_or_else(|_| {
+- self.to_opaque(ctx, extra)
+- })
++ self.try_to_rust_ty(ctx, extra)
++ .unwrap_or_else(|_| self.to_opaque(ctx, extra))
+ }
+ }
+
+ impl<T> TryToOpaque for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ type Extra = ();
+
+@@ -2978,7 +3016,7 @@
+
+ impl<T> TryToRustTy for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ type Extra = ();
+
+@@ -3041,15 +3079,11 @@
+ TypeKind::Void => Ok(raw_type(ctx, "c_void")),
+ // TODO: we should do something smart with nullptr, or maybe *const
+ // c_void is enough?
+- TypeKind::NullPtr => {
+- Ok(raw_type(ctx, "c_void").to_ptr(true))
+- }
++ TypeKind::NullPtr => Ok(raw_type(ctx, "c_void").to_ptr(true)),
+ TypeKind::Int(ik) => {
+ match ik {
+ IntKind::Bool => Ok(quote! { bool }),
+- IntKind::Char {
+- ..
+- } => Ok(raw_type(ctx, "c_char")),
++ IntKind::Char { .. } => Ok(raw_type(ctx, "c_char")),
+ IntKind::SChar => Ok(raw_type(ctx, "c_schar")),
+ IntKind::UChar => Ok(raw_type(ctx, "c_uchar")),
+ IntKind::Short => Ok(raw_type(ctx, "c_short")),
+@@ -3061,12 +3095,14 @@
+ IntKind::LongLong => Ok(raw_type(ctx, "c_longlong")),
+ IntKind::ULongLong => Ok(raw_type(ctx, "c_ulonglong")),
+ IntKind::WChar => {
+- let layout = self.layout(ctx).expect("Couldn't compute wchar_t's layout?");
++ let layout = self
++ .layout(ctx)
++ .expect("Couldn't compute wchar_t's layout?");
+ let ty = Layout::known_type_for_size(ctx, layout.size)
+ .expect("Non-representable wchar_t?");
+ let ident = ctx.rust_ident_raw(ty);
+ Ok(quote! { #ident })
+- },
++ }
+
+ IntKind::I8 => Ok(quote! { i8 }),
+ IntKind::U8 => Ok(quote! { u8 }),
+@@ -3076,9 +3112,7 @@
+ IntKind::U32 => Ok(quote! { u32 }),
+ IntKind::I64 => Ok(quote! { i64 }),
+ IntKind::U64 => Ok(quote! { u64 }),
+- IntKind::Custom {
+- name, ..
+- } => {
++ IntKind::Custom { name, .. } => {
+ let ident = ctx.rust_ident_raw(name);
+ Ok(quote! {
+ #ident
+@@ -3102,9 +3136,12 @@
+ }
+ }
+ }
+- TypeKind::Float(fk) => Ok(float_kind_rust_type(ctx, fk, self.layout(ctx))),
++ TypeKind::Float(fk) => {
++ Ok(float_kind_rust_type(ctx, fk, self.layout(ctx)))
++ }
+ TypeKind::Complex(fk) => {
+- let float_path = float_kind_rust_type(ctx, fk, self.layout(ctx));
++ let float_path =
++ float_kind_rust_type(ctx, fk, self.layout(ctx));
+
+ ctx.generated_bindgen_complex();
+ Ok(if ctx.options().enable_cxx_namespaces {
+@@ -3137,7 +3174,8 @@
+ }
+ TypeKind::Enum(..) => {
+ let path = item.namespace_aware_canonical_path(ctx);
+- let path = proc_macro2::TokenStream::from_str(&path.join("::")).unwrap();
++ let path = proc_macro2::TokenStream::from_str(&path.join("::"))
++ .unwrap();
+ Ok(quote!(#path))
+ }
+ TypeKind::TemplateInstantiation(ref inst) => {
+@@ -3151,14 +3189,18 @@
+ let void = raw_type(ctx, "c_void");
+ return Ok(void.to_ptr(/* is_const = */ false));
+ }
+- let template_params = item.used_template_params(ctx)
++ let template_params = item
++ .used_template_params(ctx)
+ .into_iter()
+ .filter(|param| param.is_template_param(ctx, &()))
+ .collect::<Vec<_>>();
+
+ if item.is_opaque(ctx, &()) && !template_params.is_empty() {
+ self.try_to_opaque(ctx, item)
+- } else if let Some(ty) = self.name().and_then(|name| utils::type_from_named(ctx, name)) {
++ } else if let Some(ty) = self
++ .name()
++ .and_then(|name| utils::type_from_named(ctx, name))
++ {
+ Ok(ty)
+ } else {
+ utils::build_path(item, ctx)
+@@ -3175,11 +3217,11 @@
+ utils::build_path(item, ctx)
+ }
+ TypeKind::Opaque => self.try_to_opaque(ctx, item),
+- TypeKind::Pointer(inner) |
+- TypeKind::Reference(inner) => {
++ TypeKind::Pointer(inner) | TypeKind::Reference(inner) => {
+ let is_const = ctx.resolve_type(inner).is_const();
+
+- let inner = inner.into_resolver().through_type_refs().resolve(ctx);
++ let inner =
++ inner.into_resolver().through_type_refs().resolve(ctx);
+ let inner_ty = inner.expect_type();
+
+ // Regardless if we can properly represent the inner type, we
+@@ -3203,13 +3245,10 @@
+ #ident
+ })
+ }
+- TypeKind::ObjCSel => {
+- Ok(quote! {
+- objc::runtime::Sel
+- })
+- }
+- TypeKind::ObjCId |
+- TypeKind::ObjCInterface(..) => Ok(quote! {
++ TypeKind::ObjCSel => Ok(quote! {
++ objc::runtime::Sel
++ }),
++ TypeKind::ObjCId | TypeKind::ObjCInterface(..) => Ok(quote! {
+ id
+ }),
+ ref u @ TypeKind::UnresolvedTypeRef(..) => {
+@@ -3227,9 +3266,9 @@
+ ctx: &BindgenContext,
+ item: &Item,
+ ) -> error::Result<Layout> {
+- item.expect_type().layout(ctx).ok_or(
+- error::Error::NoLayoutForOpaqueBlob,
+- )
++ item.expect_type()
++ .layout(ctx)
++ .ok_or(error::Error::NoLayoutForOpaqueBlob)
+ }
+ }
+
+@@ -3245,23 +3284,25 @@
+ return Err(error::Error::InstantiationOfOpaqueType);
+ }
+
+- let def = self.template_definition()
++ let def = self
++ .template_definition()
+ .into_resolver()
+ .through_type_refs()
+ .resolve(ctx);
+
+ let mut ty = quote! {};
+ let def_path = def.namespace_aware_canonical_path(ctx);
+- ty.append_separated(def_path.into_iter().map(|p| ctx.rust_ident(p)), quote!(::));
++ ty.append_separated(
++ def_path.into_iter().map(|p| ctx.rust_ident(p)),
++ quote!(::),
++ );
+
+ let def_params = def.self_template_params(ctx);
+ if def_params.is_empty() {
+ // This can happen if we generated an opaque type for a partial
+ // template specialization, and we've hit an instantiation of
+ // that partial specialization.
+- extra_assert!(
+- def.is_opaque(ctx, &())
+- );
++ extra_assert!(def.is_opaque(ctx, &()));
+ return Err(error::Error::InstantiationOfOpaqueType);
+ }
+
+@@ -3273,11 +3314,12 @@
+ // reconstruct them somehow. We don't have any means of doing
+ // that reconstruction at this time.
+
+- let template_args = self.template_arguments()
++ let template_args = self
++ .template_arguments()
+ .iter()
+ .zip(def_params.iter())
+- // Only pass type arguments for the type parameters that
+- // the def uses.
++ // Only pass type arguments for the type parameters that
++ // the def uses.
+ .filter(|&(_, param)| ctx.uses_template_parameter(def.id(), *param))
+ .map(|(arg, _)| {
+ let arg = arg.into_resolver().through_type_refs().resolve(ctx);
+@@ -3315,11 +3357,9 @@
+ warn!("Skipping function with thiscall ABI that isn't supported by the configured Rust target");
+ Ok(proc_macro2::TokenStream::new())
+ }
+- _ => {
+- Ok(quote! {
+- unsafe extern #abi fn ( #( #arguments ),* ) #ret
+- })
+- }
++ _ => Ok(quote! {
++ unsafe extern #abi fn ( #( #arguments ),* ) #ret
++ }),
+ }
+ }
+ }
+@@ -3346,10 +3386,12 @@
+ // Pure virtual methods have no actual symbol, so we can't generate
+ // something meaningful for them.
+ match self.kind() {
+- FunctionKind::Method(ref method_kind) if method_kind.is_pure_virtual() => {
++ FunctionKind::Method(ref method_kind)
++ if method_kind.is_pure_virtual() =>
++ {
+ return;
+ }
+- _ => {},
++ _ => {}
+ }
+
+ // Similar to static member variables in a class template, we can't
+@@ -3387,7 +3429,9 @@
+
+ let mut attributes = vec![];
+
+- if signature.must_use() && ctx.options().rust_features().must_use_function {
++ if signature.must_use() &&
++ ctx.options().rust_features().must_use_function
++ {
+ attributes.push(attributes::must_use());
+ }
+
+@@ -3414,9 +3458,7 @@
+ Abi::Unknown(unknown_abi) => {
+ panic!(
+ "Invalid or unknown abi {:?} for function {:?} ({:?})",
+- unknown_abi,
+- canonical_name,
+- self
++ unknown_abi, canonical_name, self
+ );
+ }
+ abi => abi,
+@@ -3440,7 +3482,6 @@
+ }
+ }
+
+-
+ fn objc_method_codegen(
+ ctx: &BindgenContext,
+ method: &ObjCMethod,
+@@ -3458,8 +3499,7 @@
+ }
+ } else {
+ let fn_args = fn_args.clone();
+- let args = iter::once(quote! { self })
+- .chain(fn_args.into_iter());
++ let args = iter::once(quote! { self }).chain(fn_args.into_iter());
+ quote! {
+ ( #( #args ),* ) #fn_ret
+ }
+@@ -3471,7 +3511,10 @@
+ let class_name = class_name
+ .expect("Generating a class method without class name?")
+ .to_owned();
+- let expect_msg = proc_macro2::Literal::string(&format!("Couldn't find {}", class_name));
++ let expect_msg = proc_macro2::Literal::string(&format!(
++ "Couldn't find {}",
++ class_name
++ ));
+ quote! {
+ msg_send!(objc::runtime::Class::get(#class_name).expect(#expect_msg), #methods_and_args)
+ }
+@@ -3481,7 +3524,8 @@
+ }
+ };
+
+- let method_name = ctx.rust_ident(format!("{}{}", prefix, method.rust_name()));
++ let method_name =
++ ctx.rust_ident(format!("{}{}", prefix, method.rust_name()));
+
+ (
+ quote! {
+@@ -3491,7 +3535,7 @@
+ },
+ quote! {
+ unsafe fn #method_name #sig ;
+- }
++ },
+ )
+ }
+
+@@ -3516,12 +3560,8 @@
+ trait_items.push(trait_item)
+ }
+
+- let instance_method_names: Vec<_> = self.methods()
+- .iter()
+- .map({
+- |m| m.rust_name()
+- })
+- .collect();
++ let instance_method_names: Vec<_> =
++ self.methods().iter().map({ |m| m.rust_name() }).collect();
+
+ for class_method in self.class_methods() {
+ let ambiquity =
+@@ -3560,7 +3600,9 @@
+ }
+ }
+
+-pub(crate) fn codegen(context: BindgenContext) -> (Vec<proc_macro2::TokenStream>, BindgenOptions) {
++pub(crate) fn codegen(
++ context: BindgenContext,
++) -> (Vec<proc_macro2::TokenStream>, BindgenOptions) {
+ context.gen(|context| {
+ let _t = context.timer("codegen");
+ let counter = Cell::new(0);
+@@ -3579,20 +3621,26 @@
+
+ if let Some(path) = context.options().emit_ir_graphviz.as_ref() {
+ match dot::write_dot_file(context, path) {
+- Ok(()) => info!("Your dot file was generated successfully into: {}", path),
++ Ok(()) => info!(
++ "Your dot file was generated successfully into: {}",
++ path
++ ),
+ Err(e) => error!("{}", e),
+ }
+ }
+
+- context.resolve_item(context.root_module())
+- .codegen(context, &mut result, &());
++ context.resolve_item(context.root_module()).codegen(
++ context,
++ &mut result,
++ &(),
++ );
+
+ result.items
+ })
+ }
+
+ mod utils {
+- use super::{ToRustTyOrOpaque, error};
++ use super::{error, ToRustTyOrOpaque};
+ use ir::context::BindgenContext;
+ use ir::function::{Abi, FunctionSig};
+ use ir::item::{Item, ItemCanonicalPath};
+@@ -3604,7 +3652,7 @@
+
+ pub fn prepend_bitfield_unit_type(
+ ctx: &BindgenContext,
+- result: &mut Vec<proc_macro2::TokenStream>
++ result: &mut Vec<proc_macro2::TokenStream>,
+ ) {
+ let bitfield_unit_src = include_str!("./bitfield_unit.rs");
+ let bitfield_unit_src = if ctx.options().rust_features().min_const_fn {
+@@ -3612,7 +3660,8 @@
+ } else {
+ Cow::Owned(bitfield_unit_src.replace("const fn ", "fn "))
+ };
+- let bitfield_unit_type = proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap();
++ let bitfield_unit_type =
++ proc_macro2::TokenStream::from_str(&bitfield_unit_src).unwrap();
+ let bitfield_unit_type = quote!(#bitfield_unit_type);
+
+ let items = vec![bitfield_unit_type];
+@@ -3673,9 +3722,9 @@
+ // If the target supports `const fn`, declare eligible functions
+ // as `const fn` else just `fn`.
+ let const_fn = if ctx.options().rust_features().min_const_fn {
+- quote!{ const fn }
++ quote! { const fn }
+ } else {
+- quote!{ fn }
++ quote! { fn }
+ };
+
+ // TODO(emilio): The fmt::Debug impl could be way nicer with
+@@ -3757,15 +3806,17 @@
+ }
+ };
+
+- let items = vec![union_field_decl,
+- union_field_impl,
+- union_field_default_impl,
+- union_field_clone_impl,
+- union_field_copy_impl,
+- union_field_debug_impl,
+- union_field_hash_impl,
+- union_field_partialeq_impl,
+- union_field_eq_impl];
++ let items = vec![
++ union_field_decl,
++ union_field_impl,
++ union_field_default_impl,
++ union_field_clone_impl,
++ union_field_copy_impl,
++ union_field_debug_impl,
++ union_field_hash_impl,
++ union_field_partialeq_impl,
++ union_field_eq_impl,
++ ];
+
+ let old_items = mem::replace(result, items);
+ result.extend(old_items.into_iter());
+@@ -3780,9 +3831,9 @@
+ // If the target supports `const fn`, declare eligible functions
+ // as `const fn` else just `fn`.
+ let const_fn = if ctx.options().rust_features().min_const_fn {
+- quote!{ const fn }
++ quote! { const fn }
+ } else {
+- quote!{ fn }
++ quote! { fn }
+ };
+
+ let incomplete_array_decl = quote! {
+@@ -3839,18 +3890,18 @@
+ }
+ };
+
+- let items = vec![incomplete_array_decl,
+- incomplete_array_impl,
+- incomplete_array_debug_impl,
+- incomplete_array_clone_impl];
++ let items = vec![
++ incomplete_array_decl,
++ incomplete_array_impl,
++ incomplete_array_debug_impl,
++ incomplete_array_clone_impl,
++ ];
+
+ let old_items = mem::replace(result, items);
+ result.extend(old_items.into_iter());
+ }
+
+- pub fn prepend_complex_type(
+- result: &mut Vec<proc_macro2::TokenStream>,
+- ) {
++ pub fn prepend_complex_type(result: &mut Vec<proc_macro2::TokenStream>) {
+ let complex_type = quote! {
+ #[derive(PartialEq, Copy, Clone, Hash, Debug, Default)]
+ #[repr(C)]
+@@ -3870,12 +3921,16 @@
+ ctx: &BindgenContext,
+ ) -> error::Result<proc_macro2::TokenStream> {
+ let path = item.namespace_aware_canonical_path(ctx);
+- let tokens = proc_macro2::TokenStream::from_str(&path.join("::")).unwrap();
++ let tokens =
++ proc_macro2::TokenStream::from_str(&path.join("::")).unwrap();
+
+ Ok(tokens)
+ }
+
+- fn primitive_ty(ctx: &BindgenContext, name: &str) -> proc_macro2::TokenStream {
++ fn primitive_ty(
++ ctx: &BindgenContext,
++ name: &str,
++ ) -> proc_macro2::TokenStream {
+ let ident = ctx.rust_ident_raw(name);
+ quote! {
+ #ident
+@@ -3911,7 +3966,7 @@
+ ) -> proc_macro2::TokenStream {
+ let return_item = ctx.resolve_item(sig.return_type());
+ if let TypeKind::Void = *return_item.kind().expect_type().kind() {
+- quote! { }
++ quote! {}
+ } else {
+ let ret_ty = return_item.to_rust_ty_or_opaque(ctx, &());
+ quote! {
+@@ -3927,58 +3982,63 @@
+ use super::ToPtr;
+
+ let mut unnamed_arguments = 0;
+- let mut args = sig.argument_types().iter().map(|&(ref name, ty)| {
+- let arg_item = ctx.resolve_item(ty);
+- let arg_ty = arg_item.kind().expect_type();
+-
+- // From the C90 standard[1]:
+- //
+- // A declaration of a parameter as "array of type" shall be
+- // adjusted to "qualified pointer to type", where the type
+- // qualifiers (if any) are those specified within the [ and ] of
+- // the array type derivation.
+- //
+- // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html
+- let arg_ty = match *arg_ty.canonical_type(ctx).kind() {
+- TypeKind::Array(t, _) => {
+- let stream = if ctx.options().array_pointers_in_arguments {
+- arg_ty.to_rust_ty_or_opaque(ctx, &arg_item)
+- } else {
+- t.to_rust_ty_or_opaque(ctx, &())
+- };
+- stream.to_ptr(ctx.resolve_type(t).is_const())
++ let mut args = sig
++ .argument_types()
++ .iter()
++ .map(|&(ref name, ty)| {
++ let arg_item = ctx.resolve_item(ty);
++ let arg_ty = arg_item.kind().expect_type();
++
++ // From the C90 standard[1]:
++ //
++ // A declaration of a parameter as "array of type" shall be
++ // adjusted to "qualified pointer to type", where the type
++ // qualifiers (if any) are those specified within the [ and ] of
++ // the array type derivation.
++ //
++ // [1]: http://c0x.coding-guidelines.com/6.7.5.3.html
++ let arg_ty = match *arg_ty.canonical_type(ctx).kind() {
++ TypeKind::Array(t, _) => {
++ let stream =
++ if ctx.options().array_pointers_in_arguments {
++ arg_ty.to_rust_ty_or_opaque(ctx, &arg_item)
++ } else {
++ t.to_rust_ty_or_opaque(ctx, &())
++ };
++ stream.to_ptr(ctx.resolve_type(t).is_const())
++ }
++ TypeKind::Pointer(inner) => {
++ let inner = ctx.resolve_item(inner);
++ let inner_ty = inner.expect_type();
++ if let TypeKind::ObjCInterface(_) =
++ *inner_ty.canonical_type(ctx).kind()
++ {
++ quote! {
++ id
++ }
++ } else {
++ arg_item.to_rust_ty_or_opaque(ctx, &())
++ }
++ }
++ _ => arg_item.to_rust_ty_or_opaque(ctx, &()),
++ };
++
++ let arg_name = match *name {
++ Some(ref name) => ctx.rust_mangle(name).into_owned(),
++ None => {
++ unnamed_arguments += 1;
++ format!("arg{}", unnamed_arguments)
++ }
++ };
++
++ assert!(!arg_name.is_empty());
++ let arg_name = ctx.rust_ident(arg_name);
++
++ quote! {
++ #arg_name : #arg_ty
+ }
+- TypeKind::Pointer(inner) => {
+- let inner = ctx.resolve_item(inner);
+- let inner_ty = inner.expect_type();
+- if let TypeKind::ObjCInterface(_) = *inner_ty.canonical_type(ctx).kind() {
+- quote! {
+- id
+- }
+- } else {
+- arg_item.to_rust_ty_or_opaque(ctx, &())
+- }
+- },
+- _ => {
+- arg_item.to_rust_ty_or_opaque(ctx, &())
+- }
+- };
+-
+- let arg_name = match *name {
+- Some(ref name) => ctx.rust_mangle(name).into_owned(),
+- None => {
+- unnamed_arguments += 1;
+- format!("arg{}", unnamed_arguments)
+- }
+- };
+-
+- assert!(!arg_name.is_empty());
+- let arg_name = ctx.rust_ident(arg_name);
+-
+- quote! {
+- #arg_name : #arg_ty
+- }
+- }).collect::<Vec<_>>();
++ })
++ .collect::<Vec<_>>();
+
+ if sig.is_variadic() {
+ args.push(quote! { ... })
+@@ -3998,11 +4058,12 @@
+ });
+
+ let return_item = ctx.resolve_item(sig.return_type());
+- let ret_ty = if let TypeKind::Void = *return_item.kind().expect_type().kind() {
+- quote! { () }
+- } else {
+- return_item.to_rust_ty_or_opaque(ctx, &())
+- };
++ let ret_ty =
++ if let TypeKind::Void = *return_item.kind().expect_type().kind() {
++ quote! { () }
++ } else {
++ return_item.to_rust_ty_or_opaque(ctx, &())
++ };
+
+ quote! {
+ *const ::block::Block<(#(#args,)*), #ret_ty>
+diff --git a/third_party/rust/bindgen/src/codegen/struct_layout.rs b/third_party/rust/bindgen/src/codegen/struct_layout.rs
+--- a/third_party/rust/bindgen/src/codegen/struct_layout.rs
++++ b/third_party/rust/bindgen/src/codegen/struct_layout.rs
+@@ -166,7 +166,8 @@
+ // This means that the structs in the array are super-unsafe to
+ // access, since they won't be properly aligned, but there's not too
+ // much we can do about it.
+- if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx) {
++ if let Some(layout) = self.ctx.resolve_type(inner).layout(self.ctx)
++ {
+ if layout.align > self.ctx.target_pointer_size() {
+ field_layout.size =
+ align_to(layout.size, layout.align) * len;
+@@ -212,7 +213,10 @@
+ if need_padding && padding_bytes != 0 {
+ Some(Layout::new(
+ padding_bytes,
+- cmp::min(field_layout.align, self.ctx.target_pointer_size())
++ cmp::min(
++ field_layout.align,
++ self.ctx.target_pointer_size(),
++ ),
+ ))
+ } else {
+ None
+@@ -235,11 +239,13 @@
+ padding_layout.map(|layout| self.padding_field(layout))
+ }
+
+- pub fn pad_struct(&mut self, layout: Layout) -> Option<proc_macro2::TokenStream> {
++ pub fn pad_struct(
++ &mut self,
++ layout: Layout,
++ ) -> Option<proc_macro2::TokenStream> {
+ debug!(
+ "pad_struct:\n\tself = {:#?}\n\tlayout = {:#?}",
+- self,
+- layout
++ self, layout
+ );
+
+ if layout.size < self.latest_offset {
+@@ -261,15 +267,15 @@
+ // other fields.
+ if padding_bytes > 0 &&
+ (padding_bytes >= layout.align ||
+- (self.last_field_was_bitfield &&
+- padding_bytes >=
+- self.latest_field_layout.unwrap().align) ||
+- layout.align > self.ctx.target_pointer_size())
++ (self.last_field_was_bitfield &&
++ padding_bytes >=
++ self.latest_field_layout.unwrap().align) ||
++ layout.align > self.ctx.target_pointer_size())
+ {
+ let layout = if self.is_packed {
+ Layout::new(padding_bytes, 1)
+ } else if self.last_field_was_bitfield ||
+- layout.align > self.ctx.target_pointer_size()
++ layout.align > self.ctx.target_pointer_size()
+ {
+ // We've already given up on alignment here.
+ Layout::for_size(self.ctx, padding_bytes)
+@@ -315,7 +321,10 @@
+
+ self.padding_count += 1;
+
+- let padding_field_name = Ident::new(&format!("__bindgen_padding_{}", padding_count), Span::call_site());
++ let padding_field_name = Ident::new(
++ &format!("__bindgen_padding_{}", padding_count),
++ Span::call_site(),
++ );
+
+ self.max_field_align = cmp::max(self.max_field_align, layout.align);
+
+@@ -342,9 +351,7 @@
+ // current field alignment and the bitfield size and alignment are.
+ debug!(
+ "align_to_bitfield? {}: {:?} {:?}",
+- self.last_field_was_bitfield,
+- layout,
+- new_field_layout
++ self.last_field_was_bitfield, layout, new_field_layout
+ );
+
+ // Avoid divide-by-zero errors if align is 0.
+diff --git a/third_party/rust/bindgen/src/features.rs b/third_party/rust/bindgen/src/features.rs
+--- a/third_party/rust/bindgen/src/features.rs
++++ b/third_party/rust/bindgen/src/features.rs
+@@ -227,27 +227,27 @@
+ fn target_features() {
+ let f_1_0 = RustFeatures::from(RustTarget::Stable_1_0);
+ assert!(
+- !f_1_0.untagged_union
+- && !f_1_0.associated_const
+- && !f_1_0.builtin_clone_impls
+- && !f_1_0.repr_align
+- && !f_1_0.thiscall_abi
++ !f_1_0.untagged_union &&
++ !f_1_0.associated_const &&
++ !f_1_0.builtin_clone_impls &&
++ !f_1_0.repr_align &&
++ !f_1_0.thiscall_abi
+ );
+ let f_1_21 = RustFeatures::from(RustTarget::Stable_1_21);
+ assert!(
+- f_1_21.untagged_union
+- && f_1_21.associated_const
+- && f_1_21.builtin_clone_impls
+- && !f_1_21.repr_align
+- && !f_1_21.thiscall_abi
++ f_1_21.untagged_union &&
++ f_1_21.associated_const &&
++ f_1_21.builtin_clone_impls &&
++ !f_1_21.repr_align &&
++ !f_1_21.thiscall_abi
+ );
+ let f_nightly = RustFeatures::from(RustTarget::Nightly);
+ assert!(
+- f_nightly.untagged_union
+- && f_nightly.associated_const
+- && f_nightly.builtin_clone_impls
+- && f_nightly.repr_align
+- && f_nightly.thiscall_abi
++ f_nightly.untagged_union &&
++ f_nightly.associated_const &&
++ f_nightly.builtin_clone_impls &&
++ f_nightly.repr_align &&
++ f_nightly.thiscall_abi
+ );
+ }
+
+diff --git a/third_party/rust/bindgen/src/ir/analysis/derive.rs b/third_party/rust/bindgen/src/ir/analysis/derive.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/derive.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/derive.rs
+@@ -2,18 +2,18 @@
+
+ use std::fmt;
+
+-use super::{ConstrainResult, MonotoneFramework, generate_dependencies};
++use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
+ use ir::analysis::has_vtable::HasVtable;
+ use ir::comp::CompKind;
+ use ir::context::{BindgenContext, ItemId};
+ use ir::derive::CanDerive;
+ use ir::function::FunctionSig;
+-use ir::item::{Item, IsOpaque};
++use ir::item::{IsOpaque, Item};
+ use ir::template::TemplateParameters;
+ use ir::traversal::{EdgeKind, Trace};
+ use ir::ty::RUST_DERIVE_IN_ARRAY_LIMIT;
+-use ir::ty::{TypeKind, Type};
+-use {HashSet, HashMap, Entry};
++use ir::ty::{Type, TypeKind};
++use {Entry, HashMap, HashSet};
+
+ /// Which trait to consider when doing the `CannotDerive` analysis.
+ #[derive(Debug, Copy, Clone)]
+@@ -110,19 +110,26 @@
+ can_derive: CanDerive,
+ ) -> ConstrainResult {
+ let id = id.into();
+- trace!("inserting {:?} can_derive<{}>={:?}", id, self.derive_trait, can_derive);
++ trace!(
++ "inserting {:?} can_derive<{}>={:?}",
++ id,
++ self.derive_trait,
++ can_derive
++ );
+
+ if let CanDerive::Yes = can_derive {
+ return ConstrainResult::Same;
+ }
+
+ match self.can_derive.entry(id) {
+- Entry::Occupied(mut entry) => if *entry.get() < can_derive {
+- entry.insert(can_derive);
+- ConstrainResult::Changed
+- } else {
+- ConstrainResult::Same
+- },
++ Entry::Occupied(mut entry) => {
++ if *entry.get() < can_derive {
++ entry.insert(can_derive);
++ ConstrainResult::Changed
++ } else {
++ ConstrainResult::Same
++ }
++ }
+ Entry::Vacant(entry) => {
+ entry.insert(can_derive);
+ ConstrainResult::Changed
+@@ -132,41 +139,50 @@
+
+ fn constrain_type(&mut self, item: &Item, ty: &Type) -> CanDerive {
+ if !self.ctx.whitelisted_items().contains(&item.id()) {
+- trace!(" cannot derive {} for blacklisted type", self.derive_trait);
++ trace!(
++ " cannot derive {} for blacklisted type",
++ self.derive_trait
++ );
+ return CanDerive::No;
+ }
+
+ if self.derive_trait.not_by_name(self.ctx, &item) {
+- trace!(" cannot derive {} for explicitly excluded type", self.derive_trait);
++ trace!(
++ " cannot derive {} for explicitly excluded type",
++ self.derive_trait
++ );
+ return CanDerive::No;
+ }
+
+ trace!("ty: {:?}", ty);
+ if item.is_opaque(self.ctx, &()) {
+- if !self.derive_trait.can_derive_union()
+- && ty.is_union()
+- && self.ctx.options().rust_features().untagged_union
++ if !self.derive_trait.can_derive_union() &&
++ ty.is_union() &&
++ self.ctx.options().rust_features().untagged_union
+ {
+ trace!(
+- " cannot derive {} for Rust unions", self.derive_trait
++ " cannot derive {} for Rust unions",
++ self.derive_trait
+ );
+ return CanDerive::No;
+ }
+
+- let layout_can_derive = ty.layout(self.ctx)
+- .map_or(CanDerive::Yes, |l| {
++ let layout_can_derive =
++ ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
+ l.opaque().array_size_within_derive_limit(self.ctx)
+ });
+
+ match layout_can_derive {
+ CanDerive::Yes => {
+ trace!(
+- " we can trivially derive {} for the layout", self.derive_trait
++ " we can trivially derive {} for the layout",
++ self.derive_trait
+ );
+ }
+ _ => {
+ trace!(
+- " we cannot derive {} for the layout", self.derive_trait
++ " we cannot derive {} for the layout",
++ self.derive_trait
+ );
+ }
+ };
+@@ -194,7 +210,7 @@
+ let inner_type =
+ self.ctx.resolve_type(inner).canonical_type(self.ctx);
+ if let TypeKind::Function(ref sig) = *inner_type.kind() {
+- return self.derive_trait.can_derive_fnptr(sig)
++ return self.derive_trait.can_derive_fnptr(sig);
+ } else {
+ return self.derive_trait.can_derive_pointer();
+ }
+@@ -205,31 +221,34 @@
+
+ // Complex cases need more information
+ TypeKind::Array(t, len) => {
+- let inner_type = self.can_derive
+- .get(&t.into())
+- .cloned()
+- .unwrap_or_default();
++ let inner_type =
++ self.can_derive.get(&t.into()).cloned().unwrap_or_default();
+ if inner_type != CanDerive::Yes {
+ trace!(
+ " arrays of T for which we cannot derive {} \
+- also cannot derive {}", self.derive_trait, self.derive_trait
++ also cannot derive {}",
++ self.derive_trait,
++ self.derive_trait
+ );
+ return CanDerive::No;
+ }
+
+- if len == 0 && !self.derive_trait.can_derive_incomplete_array() {
++ if len == 0 && !self.derive_trait.can_derive_incomplete_array()
++ {
+ trace!(
+- " cannot derive {} for incomplete arrays", self.derive_trait
++ " cannot derive {} for incomplete arrays",
++ self.derive_trait
+ );
+ return CanDerive::No;
+ } else {
+ if self.derive_trait.can_derive_large_array() {
+- trace!(" array can derive {}", self.derive_trait);
+- return CanDerive::Yes;
++ trace!(" array can derive {}", self.derive_trait);
++ return CanDerive::Yes;
+ } else {
+ if len <= RUST_DERIVE_IN_ARRAY_LIMIT {
+ trace!(
+- " array is small enough to derive {}", self.derive_trait
++ " array is small enough to derive {}",
++ self.derive_trait
+ );
+ return CanDerive::Yes;
+ } else {
+@@ -242,19 +261,19 @@
+ }
+ }
+ TypeKind::Vector(t, len) => {
+- let inner_type = self.can_derive
+- .get(&t.into())
+- .cloned()
+- .unwrap_or_default();
++ let inner_type =
++ self.can_derive.get(&t.into()).cloned().unwrap_or_default();
+ if inner_type != CanDerive::Yes {
+ trace!(
+ " vectors of T for which we cannot derive {} \
+- also cannot derive {}", self.derive_trait, self.derive_trait
++ also cannot derive {}",
++ self.derive_trait,
++ self.derive_trait
+ );
+ return CanDerive::No;
+ }
+ assert_ne!(len, 0, "vectors cannot have zero length");
+- return self.derive_trait.can_derive_vector()
++ return self.derive_trait.can_derive_vector();
+ }
+
+ TypeKind::Comp(ref info) => {
+@@ -263,19 +282,28 @@
+ "The early ty.is_opaque check should have handled this case"
+ );
+
+- if !self.derive_trait.can_derive_compound_forward_decl()
+- && info.is_forward_declaration() {
+- trace!(" cannot derive {} for forward decls", self.derive_trait);
++ if !self.derive_trait.can_derive_compound_forward_decl() &&
++ info.is_forward_declaration()
++ {
++ trace!(
++ " cannot derive {} for forward decls",
++ self.derive_trait
++ );
+ return CanDerive::No;
+ }
+
+ // NOTE: Take into account that while unions in C and C++ are copied by
+ // default, the may have an explicit destructor in C++, so we can't
+ // defer this check just for the union case.
+- if !self.derive_trait.can_derive_compound_with_destructor()
+- && self.ctx.lookup_has_destructor(item.id().expect_type_id(self.ctx))
++ if !self.derive_trait.can_derive_compound_with_destructor() &&
++ self.ctx.lookup_has_destructor(
++ item.id().expect_type_id(self.ctx),
++ )
+ {
+- trace!(" comp has destructor which cannot derive {}", self.derive_trait);
++ trace!(
++ " comp has destructor which cannot derive {}",
++ self.derive_trait
++ );
+ return CanDerive::No;
+ }
+
+@@ -284,34 +312,39 @@
+ if self.ctx.options().rust_features().untagged_union &&
+ // https://github.com/rust-lang/rust/issues/36640
+ (!info.self_template_params(self.ctx).is_empty() ||
+- !item.all_template_params(self.ctx).is_empty()) {
++ !item.all_template_params(self.ctx).is_empty())
++ {
+ trace!(
+ " cannot derive {} for Rust union because issue 36640", self.derive_trait
+ );
+ return CanDerive::No;
+ }
+- // fall through to be same as non-union handling
++ // fall through to be same as non-union handling
+ } else {
+ if self.ctx.options().rust_features().untagged_union {
+ trace!(
+- " cannot derive {} for Rust unions", self.derive_trait
++ " cannot derive {} for Rust unions",
++ self.derive_trait
+ );
+ return CanDerive::No;
+ }
+
+ let layout_can_derive =
+ ty.layout(self.ctx).map_or(CanDerive::Yes, |l| {
+- l.opaque().array_size_within_derive_limit(self.ctx)
++ l.opaque()
++ .array_size_within_derive_limit(self.ctx)
+ });
+ match layout_can_derive {
+ CanDerive::Yes => {
+ trace!(
+- " union layout can trivially derive {}", self.derive_trait
++ " union layout can trivially derive {}",
++ self.derive_trait
+ );
+ }
+ _ => {
+ trace!(
+- " union layout cannot derive {}", self.derive_trait
++ " union layout cannot derive {}",
++ self.derive_trait
+ );
+ }
+ };
+@@ -319,9 +352,13 @@
+ }
+ }
+
+- if !self.derive_trait.can_derive_compound_with_vtable()
+- && item.has_vtable(self.ctx) {
+- trace!(" cannot derive {} for comp with vtable", self.derive_trait);
++ if !self.derive_trait.can_derive_compound_with_vtable() &&
++ item.has_vtable(self.ctx)
++ {
++ trace!(
++ " cannot derive {} for comp with vtable",
++ self.derive_trait
++ );
+ return CanDerive::No;
+ }
+
+@@ -348,7 +385,11 @@
+ }
+ }
+
+- fn constrain_join(&mut self, item: &Item, consider_edge: EdgePredicate) -> CanDerive {
++ fn constrain_join(
++ &mut self,
++ item: &Item,
++ consider_edge: EdgePredicate,
++ ) -> CanDerive {
+ let mut candidate = None;
+
+ item.trace(
+@@ -378,7 +419,10 @@
+ );
+
+ if candidate.is_none() {
+- trace!(" can derive {} because there are no members", self.derive_trait);
++ trace!(
++ " can derive {} because there are no members",
++ self.derive_trait
++ );
+ }
+ candidate.unwrap_or_default()
+ }
+@@ -389,8 +433,10 @@
+ match self {
+ DeriveTrait::Copy => ctx.no_copy_by_name(item),
+ DeriveTrait::Hash => ctx.no_hash_by_name(item),
+- DeriveTrait::PartialEqOrPartialOrd => ctx.no_partialeq_by_name(item),
+- _ => false
++ DeriveTrait::PartialEqOrPartialOrd => {
++ ctx.no_partialeq_by_name(item)
++ }
++ _ => false,
+ }
+ }
+
+@@ -398,17 +444,16 @@
+ match self {
+ DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
+ _ => |kind| match kind {
+- EdgeKind::BaseMember |
+- EdgeKind::Field => true,
++ EdgeKind::BaseMember | EdgeKind::Field => true,
+ _ => false,
+- }
++ },
+ }
+ }
+
+ fn consider_edge_typeref(&self) -> EdgePredicate {
+ match self {
+ DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
+- _ => |kind| kind == EdgeKind::TypeReference
++ _ => |kind| kind == EdgeKind::TypeReference,
+ }
+ }
+
+@@ -416,10 +461,11 @@
+ match self {
+ DeriveTrait::PartialEqOrPartialOrd => consider_edge_default,
+ _ => |kind| match kind {
+- EdgeKind::TemplateArgument |
+- EdgeKind::TemplateDeclaration => true,
++ EdgeKind::TemplateArgument | EdgeKind::TemplateDeclaration => {
++ true
++ }
+ _ => false,
+- }
++ },
+ }
+ }
+
+@@ -460,16 +506,16 @@
+
+ fn can_derive_incomplete_array(&self) -> bool {
+ match self {
+- DeriveTrait::Copy | DeriveTrait::Hash | DeriveTrait::PartialEqOrPartialOrd => false,
++ DeriveTrait::Copy |
++ DeriveTrait::Hash |
++ DeriveTrait::PartialEqOrPartialOrd => false,
+ _ => true,
+ }
+ }
+
+ fn can_derive_fnptr(&self, f: &FunctionSig) -> CanDerive {
+ match (self, f.function_pointers_can_derive()) {
+- (DeriveTrait::Copy, _) |
+- (DeriveTrait::Default, _) |
+- (_, true) => {
++ (DeriveTrait::Copy, _) | (DeriveTrait::Default, _) | (_, true) => {
+ trace!(" function pointer can derive {}", self);
+ CanDerive::Yes
+ }
+@@ -526,21 +572,23 @@
+ (DeriveTrait::Default, TypeKind::ObjCSel) => {
+ trace!(" types that always cannot derive Default");
+ CanDerive::No
+- },
+- (DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => unreachable!(
+- "Type with unresolved type ref can't reach derive default"
+- ),
++ }
++ (DeriveTrait::Default, TypeKind::UnresolvedTypeRef(..)) => {
++ unreachable!(
++ "Type with unresolved type ref can't reach derive default"
++ )
++ }
+ // === Hash ===
+ (DeriveTrait::Hash, TypeKind::Float(..)) |
+ (DeriveTrait::Hash, TypeKind::Complex(..)) => {
+ trace!(" float cannot derive Hash");
+ CanDerive::No
+- },
++ }
+ // === others ===
+ _ => {
+ trace!(" simple type that can always derive {}", self);
+ CanDerive::Yes
+- },
++ }
+ }
+ }
+ }
+@@ -602,9 +650,7 @@
+ trace!("constrain: {:?}", id);
+
+ if let Some(CanDerive::No) = self.can_derive.get(&id).cloned() {
+- trace!(
+- " already know it cannot derive {}", self.derive_trait
+- );
++ trace!(" already know it cannot derive {}", self.derive_trait);
+ return ConstrainResult::Same;
+ }
+
+@@ -614,7 +660,9 @@
+ let mut can_derive = self.constrain_type(item, ty);
+ if let CanDerive::Yes = can_derive {
+ if !self.derive_trait.can_derive_large_array() &&
+- ty.layout(self.ctx).map_or(false, |l| l.align > RUST_DERIVE_IN_ARRAY_LIMIT)
++ ty.layout(self.ctx).map_or(false, |l| {
++ l.align > RUST_DERIVE_IN_ARRAY_LIMIT
++ })
+ {
+ // We have to be conservative: the struct *could* have enough
+ // padding that we emit an array that is longer than
+@@ -647,12 +695,10 @@
+
+ impl<'ctx> From<CannotDerive<'ctx>> for HashMap<ItemId, CanDerive> {
+ fn from(analysis: CannotDerive<'ctx>) -> Self {
+- extra_assert!(
+- analysis
+- .can_derive
+- .values()
+- .all(|v| *v != CanDerive::Yes)
+- );
++ extra_assert!(analysis
++ .can_derive
++ .values()
++ .all(|v| *v != CanDerive::Yes));
+
+ analysis.can_derive
+ }
+@@ -662,9 +708,11 @@
+ ///
+ /// Elements that are not `CanDerive::Yes` are kept in the set, so that it
+ /// represents all items that cannot derive.
+-pub fn as_cannot_derive_set(can_derive: HashMap<ItemId, CanDerive>) -> HashSet<ItemId> {
++pub fn as_cannot_derive_set(
++ can_derive: HashMap<ItemId, CanDerive>,
++) -> HashSet<ItemId> {
+ can_derive
+ .into_iter()
+- .filter_map(|(k, v)| if v != CanDerive::Yes { Some(k) } else { None } )
++ .filter_map(|(k, v)| if v != CanDerive::Yes { Some(k) } else { None })
+ .collect()
+ }
+diff --git a/third_party/rust/bindgen/src/ir/analysis/has_destructor.rs b/third_party/rust/bindgen/src/ir/analysis/has_destructor.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/has_destructor.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/has_destructor.rs
+@@ -1,9 +1,9 @@
+ //! Determining which types have destructors
+
+-use super::{ConstrainResult, MonotoneFramework, generate_dependencies};
++use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
++use ir::comp::{CompKind, Field, FieldMethods};
+ use ir::context::{BindgenContext, ItemId};
+ use ir::traversal::EdgeKind;
+-use ir::comp::{CompKind, Field, FieldMethods};
+ use ir::ty::TypeKind;
+ use {HashMap, HashSet};
+
+@@ -121,14 +121,14 @@
+ let base_or_field_destructor =
+ info.base_members().iter().any(|base| {
+ self.have_destructor.contains(&base.ty.into())
+- }) ||
+- info.fields().iter().any(|field| {
+- match *field {
+- Field::DataMember(ref data) =>
+- self.have_destructor.contains(&data.ty().into()),
+- Field::Bitfields(_) => false
+- }
+- });
++ }) || info.fields().iter().any(
++ |field| match *field {
++ Field::DataMember(ref data) => self
++ .have_destructor
++ .contains(&data.ty().into()),
++ Field::Bitfields(_) => false,
++ },
++ );
+ if base_or_field_destructor {
+ self.insert(id)
+ } else {
+@@ -139,9 +139,9 @@
+ }
+
+ TypeKind::TemplateInstantiation(ref inst) => {
+- let definition_or_arg_destructor =
+- self.have_destructor.contains(&inst.template_definition().into())
+- ||
++ let definition_or_arg_destructor = self
++ .have_destructor
++ .contains(&inst.template_definition().into()) ||
+ inst.template_arguments().iter().any(|arg| {
+ self.have_destructor.contains(&arg.into())
+ });
+diff --git a/third_party/rust/bindgen/src/ir/analysis/has_float.rs b/third_party/rust/bindgen/src/ir/analysis/has_float.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/has_float.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/has_float.rs
+@@ -1,12 +1,12 @@
+ //! Determining which types has float.
+
+-use super::{ConstrainResult, MonotoneFramework, generate_dependencies};
+-use {HashSet, HashMap};
++use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
++use ir::comp::Field;
++use ir::comp::FieldMethods;
+ use ir::context::{BindgenContext, ItemId};
+ use ir::traversal::EdgeKind;
+ use ir::ty::TypeKind;
+-use ir::comp::Field;
+-use ir::comp::FieldMethods;
++use {HashMap, HashSet};
+
+ /// An analysis that finds for each IR item whether it has float or not.
+ ///
+@@ -132,24 +132,27 @@
+ ConstrainResult::Same
+ }
+
+- TypeKind::Float(..) |
+- TypeKind::Complex(..) => {
++ TypeKind::Float(..) | TypeKind::Complex(..) => {
+ trace!(" float type has float");
+ self.insert(id)
+ }
+
+ TypeKind::Array(t, _) => {
+ if self.has_float.contains(&t.into()) {
+- trace!(" Array with type T that has float also has float");
+- return self.insert(id)
++ trace!(
++ " Array with type T that has float also has float"
++ );
++ return self.insert(id);
+ }
+ trace!(" Array with type T that do not have float also do not have float");
+ ConstrainResult::Same
+ }
+ TypeKind::Vector(t, _) => {
+ if self.has_float.contains(&t.into()) {
+- trace!(" Vector with type T that has float also has float");
+- return self.insert(id)
++ trace!(
++ " Vector with type T that has float also has float"
++ );
++ return self.insert(id);
+ }
+ trace!(" Vector with type T that do not have float also do not have float");
+ ConstrainResult::Same
+@@ -160,8 +163,10 @@
+ TypeKind::Alias(t) |
+ TypeKind::BlockPointer(t) => {
+ if self.has_float.contains(&t.into()) {
+- trace!(" aliases and type refs to T which have float \
+- also have float");
++ trace!(
++ " aliases and type refs to T which have float \
++ also have float"
++ );
+ self.insert(id)
+ } else {
+ trace!(" aliases and type refs to T which do not have float \
+@@ -171,28 +176,23 @@
+ }
+
+ TypeKind::Comp(ref info) => {
+- let bases_have = info.base_members()
++ let bases_have = info
++ .base_members()
+ .iter()
+ .any(|base| self.has_float.contains(&base.ty.into()));
+ if bases_have {
+ trace!(" bases have float, so we also have");
+ return self.insert(id);
+ }
+- let fields_have = info.fields()
+- .iter()
+- .any(|f| {
+- match *f {
+- Field::DataMember(ref data) => {
+- self.has_float.contains(&data.ty().into())
+- }
+- Field::Bitfields(ref bfu) => {
+- bfu.bitfields()
+- .iter().any(|b| {
+- self.has_float.contains(&b.ty().into())
+- })
+- },
+- }
+- });
++ let fields_have = info.fields().iter().any(|f| match *f {
++ Field::DataMember(ref data) => {
++ self.has_float.contains(&data.ty().into())
++ }
++ Field::Bitfields(ref bfu) => bfu
++ .bitfields()
++ .iter()
++ .any(|b| self.has_float.contains(&b.ty().into())),
++ });
+ if fields_have {
+ trace!(" fields have float, so we also have");
+ return self.insert(id);
+@@ -203,20 +203,26 @@
+ }
+
+ TypeKind::TemplateInstantiation(ref template) => {
+- let args_have = template.template_arguments()
++ let args_have = template
++ .template_arguments()
+ .iter()
+ .any(|arg| self.has_float.contains(&arg.into()));
+ if args_have {
+- trace!(" template args have float, so \
+- insantiation also has float");
++ trace!(
++ " template args have float, so \
++ insantiation also has float"
++ );
+ return self.insert(id);
+ }
+
+- let def_has = self.has_float
++ let def_has = self
++ .has_float
+ .contains(&template.template_definition().into());
+ if def_has {
+- trace!(" template definition has float, so \
+- insantiation also has");
++ trace!(
++ " template definition has float, so \
++ insantiation also has"
++ );
+ return self.insert(id);
+ }
+
+@@ -227,7 +233,8 @@
+ }
+
+ fn each_depending_on<F>(&self, id: ItemId, mut f: F)
+- where F: FnMut(ItemId),
++ where
++ F: FnMut(ItemId),
+ {
+ if let Some(edges) = self.dependencies.get(&id) {
+ for item in edges {
+diff --git a/third_party/rust/bindgen/src/ir/analysis/has_type_param_in_array.rs b/third_party/rust/bindgen/src/ir/analysis/has_type_param_in_array.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/has_type_param_in_array.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/has_type_param_in_array.rs
+@@ -1,6 +1,6 @@
+ //! Determining which types has typed parameters in array.
+
+-use super::{ConstrainResult, MonotoneFramework, generate_dependencies};
++use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
+ use ir::comp::Field;
+ use ir::comp::FieldMethods;
+ use ir::context::{BindgenContext, ItemId};
+@@ -88,9 +88,7 @@
+ type Extra = &'ctx BindgenContext;
+ type Output = HashSet<ItemId>;
+
+- fn new(
+- ctx: &'ctx BindgenContext,
+- ) -> HasTypeParameterInArray<'ctx> {
++ fn new(ctx: &'ctx BindgenContext) -> HasTypeParameterInArray<'ctx> {
+ let has_type_parameter_in_array = HashSet::default();
+ let dependencies = generate_dependencies(ctx, Self::consider_edge);
+
+@@ -169,7 +167,7 @@
+ if self.has_type_parameter_in_array.contains(&t.into()) {
+ trace!(
+ " aliases and type refs to T which have array \
+- also have array"
++ also have array"
+ );
+ self.insert(id)
+ } else {
+@@ -190,9 +188,9 @@
+ return self.insert(id);
+ }
+ let fields_have = info.fields().iter().any(|f| match *f {
+- Field::DataMember(ref data) => {
+- self.has_type_parameter_in_array.contains(&data.ty().into())
+- }
++ Field::DataMember(ref data) => self
++ .has_type_parameter_in_array
++ .contains(&data.ty().into()),
+ Field::Bitfields(..) => false,
+ });
+ if fields_have {
+@@ -212,18 +210,18 @@
+ if args_have {
+ trace!(
+ " template args have array, so \
+- insantiation also has array"
++ insantiation also has array"
+ );
+ return self.insert(id);
+ }
+
+- let def_has = self.has_type_parameter_in_array.contains(
+- &template.template_definition().into(),
+- );
++ let def_has = self
++ .has_type_parameter_in_array
++ .contains(&template.template_definition().into());
+ if def_has {
+ trace!(
+ " template definition has array, so \
+- insantiation also has"
++ insantiation also has"
+ );
+ return self.insert(id);
+ }
+diff --git a/third_party/rust/bindgen/src/ir/analysis/has_vtable.rs b/third_party/rust/bindgen/src/ir/analysis/has_vtable.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/has_vtable.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/has_vtable.rs
+@@ -1,25 +1,25 @@
+ //! Determining which types has vtable
+
+-use super::{ConstrainResult, MonotoneFramework, generate_dependencies};
++use super::{generate_dependencies, ConstrainResult, MonotoneFramework};
+ use ir::context::{BindgenContext, ItemId};
+ use ir::traversal::EdgeKind;
+ use ir::ty::TypeKind;
+ use std::cmp;
+ use std::ops;
+-use {HashMap, Entry};
++use {Entry, HashMap};
+
+ /// The result of the `HasVtableAnalysis` for an individual item.
+-#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord)]
++#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+ pub enum HasVtableResult {
+- /// The item has a vtable, but the actual vtable pointer is in a base
+- /// member.
+- BaseHasVtable,
++ /// The item does not have a vtable pointer.
++ No,
+
+ /// The item has a vtable and the actual vtable pointer is within this item.
+ SelfHasVtable,
+
+- /// The item does not have a vtable pointer.
+- No
++ /// The item has a vtable, but the actual vtable pointer is in a base
++ /// member.
++ BaseHasVtable,
+ }
+
+ impl Default for HasVtableResult {
+@@ -28,21 +28,6 @@
+ }
+ }
+
+-impl cmp::PartialOrd for HasVtableResult {
+- fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
+- use self::HasVtableResult::*;
+-
+- match (*self, *rhs) {
+- (x, y) if x == y => Some(cmp::Ordering::Equal),
+- (BaseHasVtable, _) => Some(cmp::Ordering::Greater),
+- (_, BaseHasVtable) => Some(cmp::Ordering::Less),
+- (SelfHasVtable, _) => Some(cmp::Ordering::Greater),
+- (_, SelfHasVtable) => Some(cmp::Ordering::Less),
+- _ => unreachable!(),
+- }
+- }
+-}
+-
+ impl HasVtableResult {
+ /// Take the least upper bound of `self` and `rhs`.
+ pub fn join(self, rhs: Self) -> Self {
+@@ -104,7 +89,11 @@
+ }
+ }
+
+- fn insert<Id: Into<ItemId>>(&mut self, id: Id, result: HasVtableResult) -> ConstrainResult {
++ fn insert<Id: Into<ItemId>>(
++ &mut self,
++ id: Id,
++ result: HasVtableResult,
++ ) -> ConstrainResult {
+ if let HasVtableResult::No = result {
+ return ConstrainResult::Same;
+ }
+@@ -176,7 +165,9 @@
+ TypeKind::Alias(t) |
+ TypeKind::ResolvedTypeRef(t) |
+ TypeKind::Reference(t) => {
+- trace!(" aliases and references forward to their inner type");
++ trace!(
++ " aliases and references forward to their inner type"
++ );
+ self.forward(t, id)
+ }
+
+@@ -224,9 +215,10 @@
+ impl<'ctx> From<HasVtableAnalysis<'ctx>> for HashMap<ItemId, HasVtableResult> {
+ fn from(analysis: HasVtableAnalysis<'ctx>) -> Self {
+ // We let the lack of an entry mean "No" to save space.
+- extra_assert!(analysis.have_vtable.values().all(|v| {
+- *v != HasVtableResult::No
+- }));
++ extra_assert!(analysis
++ .have_vtable
++ .values()
++ .all(|v| { *v != HasVtableResult::No }));
+
+ analysis.have_vtable
+ }
+diff --git a/third_party/rust/bindgen/src/ir/analysis/mod.rs b/third_party/rust/bindgen/src/ir/analysis/mod.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/mod.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/mod.rs
+@@ -41,7 +41,7 @@
+ mod template_params;
+ pub use self::template_params::UsedTemplateParameters;
+ mod derive;
+-pub use self::derive::{CannotDerive, DeriveTrait, as_cannot_derive_set};
++pub use self::derive::{as_cannot_derive_set, CannotDerive, DeriveTrait};
+ mod has_vtable;
+ pub use self::has_vtable::{HasVtable, HasVtableAnalysis, HasVtableResult};
+ mod has_destructor;
+@@ -56,9 +56,9 @@
+ use ir::context::{BindgenContext, ItemId};
+
+ use ir::traversal::{EdgeKind, Trace};
+-use HashMap;
+ use std::fmt;
+ use std::ops;
++use HashMap;
+
+ /// An analysis in the monotone framework.
+ ///
+@@ -164,10 +164,9 @@
+
+ while let Some(node) = worklist.pop() {
+ if let ConstrainResult::Changed = analysis.constrain(node) {
+- analysis.each_depending_on(
+- node,
+- |needs_work| { worklist.push(needs_work); },
+- );
++ analysis.each_depending_on(node, |needs_work| {
++ worklist.push(needs_work);
++ });
+ }
+ }
+
+@@ -196,9 +195,10 @@
+ if ctx.whitelisted_items().contains(&sub_item) &&
+ consider_edge(edge_kind)
+ {
+- dependencies.entry(sub_item).or_insert(vec![]).push(
+- item,
+- );
++ dependencies
++ .entry(sub_item)
++ .or_insert(vec![])
++ .push(item);
+ }
+ },
+ &(),
+@@ -325,13 +325,17 @@
+ // Yes, what follows is a **terribly** inefficient set union
+ // implementation. Don't copy this code outside of this test!
+
+- let original_size =
+- self.reachable.entry(node).or_insert(HashSet::default()).len();
++ let original_size = self
++ .reachable
++ .entry(node)
++ .or_insert(HashSet::default())
++ .len();
+
+ for sub_node in self.graph.0[&node].iter() {
+ self.reachable.get_mut(&node).unwrap().insert(*sub_node);
+
+- let sub_reachable = self.reachable
++ let sub_reachable = self
++ .reachable
+ .entry(*sub_node)
+ .or_insert(HashSet::default())
+ .clone();
+diff --git a/third_party/rust/bindgen/src/ir/analysis/sizedness.rs b/third_party/rust/bindgen/src/ir/analysis/sizedness.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/sizedness.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/sizedness.rs
+@@ -1,12 +1,14 @@
+ //! Determining the sizedness of types (as base classes and otherwise).
+
+-use super::{ConstrainResult, MonotoneFramework, HasVtable, generate_dependencies};
++use super::{
++ generate_dependencies, ConstrainResult, HasVtable, MonotoneFramework,
++};
+ use ir::context::{BindgenContext, TypeId};
+ use ir::item::IsOpaque;
+ use ir::traversal::EdgeKind;
+ use ir::ty::TypeKind;
+ use std::{cmp, ops};
+-use {HashMap, Entry};
++use {Entry, HashMap};
+
+ /// The result of the `Sizedness` analysis for an individual item.
+ ///
+@@ -22,13 +24,14 @@
+ ///
+ /// We initially assume that all types are `ZeroSized` and then update our
+ /// understanding as we learn more about each type.
+-#[derive(Copy, Clone, Debug, PartialEq, Eq, Ord)]
++#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
+ pub enum SizednessResult {
+- /// Has some size that is known to be greater than zero. That doesn't mean
+- /// it has a static size, but it is not zero sized for sure. In other words,
+- /// it might contain an incomplete array or some other dynamically sized
+- /// type.
+- NonZeroSized,
++ /// The type is zero-sized.
++ ///
++ /// This means that if it is a C++ type, and is not being used as a base
++ /// member, then we must add an `_address` byte to enforce the
++ /// unique-address-per-distinct-object-instance rule.
++ ZeroSized,
+
+ /// Whether this type is zero-sized or not depends on whether a type
+ /// parameter is zero-sized or not.
+@@ -52,12 +55,11 @@
+ /// https://github.com/rust-lang/rust-bindgen/issues/586
+ DependsOnTypeParam,
+
+- /// The type is zero-sized.
+- ///
+- /// This means that if it is a C++ type, and is not being used as a base
+- /// member, then we must add an `_address` byte to enforce the
+- /// unique-address-per-distinct-object-instance rule.
+- ZeroSized,
++ /// Has some size that is known to be greater than zero. That doesn't mean
++ /// it has a static size, but it is not zero sized for sure. In other words,
++ /// it might contain an incomplete array or some other dynamically sized
++ /// type.
++ NonZeroSized,
+ }
+
+ impl Default for SizednessResult {
+@@ -66,21 +68,6 @@
+ }
+ }
+
+-impl cmp::PartialOrd for SizednessResult {
+- fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
+- use self::SizednessResult::*;
+-
+- match (*self, *rhs) {
+- (x, y) if x == y => Some(cmp::Ordering::Equal),
+- (NonZeroSized, _) => Some(cmp::Ordering::Greater),
+- (_, NonZeroSized) => Some(cmp::Ordering::Less),
+- (DependsOnTypeParam, _) => Some(cmp::Ordering::Greater),
+- (_, DependsOnTypeParam) => Some(cmp::Ordering::Less),
+- _ => unreachable!(),
+- }
+- }
+-}
+-
+ impl SizednessResult {
+ /// Take the least upper bound of `self` and `rhs`.
+ pub fn join(self, rhs: Self) -> Self {
+@@ -140,7 +127,11 @@
+
+ /// Insert an incremental result, and return whether this updated our
+ /// knowledge of types and we should continue the analysis.
+- fn insert(&mut self, id: TypeId, result: SizednessResult) -> ConstrainResult {
++ fn insert(
++ &mut self,
++ id: TypeId,
++ result: SizednessResult,
++ ) -> ConstrainResult {
+ trace!("inserting {:?} for {:?}", result, id);
+
+ if let SizednessResult::ZeroSized = result {
+@@ -180,15 +171,15 @@
+ let dependencies = generate_dependencies(ctx, Self::consider_edge)
+ .into_iter()
+ .filter_map(|(id, sub_ids)| {
+- id.as_type_id(ctx)
+- .map(|id| {
+- (
+- id,
+- sub_ids.into_iter()
+- .filter_map(|s| s.as_type_id(ctx))
+- .collect::<Vec<_>>()
+- )
+- })
++ id.as_type_id(ctx).map(|id| {
++ (
++ id,
++ sub_ids
++ .into_iter()
++ .filter_map(|s| s.as_type_id(ctx))
++ .collect::<Vec<_>>(),
++ )
++ })
+ })
+ .collect();
+
+@@ -213,7 +204,9 @@
+ fn constrain(&mut self, id: TypeId) -> ConstrainResult {
+ trace!("constrain {:?}", id);
+
+- if let Some(SizednessResult::NonZeroSized) = self.sized.get(&id).cloned() {
++ if let Some(SizednessResult::NonZeroSized) =
++ self.sized.get(&id).cloned()
++ {
+ trace!(" already know it is not zero-sized");
+ return ConstrainResult::Same;
+ }
+@@ -227,8 +220,8 @@
+
+ if id.is_opaque(self.ctx, &()) {
+ trace!(" type is opaque; checking layout...");
+- let result = ty.layout(self.ctx)
+- .map_or(SizednessResult::ZeroSized, |l| {
++ let result =
++ ty.layout(self.ctx).map_or(SizednessResult::ZeroSized, |l| {
+ if l.size == 0 {
+ trace!(" ...layout has size == 0");
+ SizednessResult::ZeroSized
+@@ -247,8 +240,10 @@
+ }
+
+ TypeKind::TypeParam => {
+- trace!(" type params sizedness depends on what they're \
+- instantiated as");
++ trace!(
++ " type params sizedness depends on what they're \
++ instantiated as"
++ );
+ self.insert(id, SizednessResult::DependsOnTypeParam)
+ }
+
+@@ -280,8 +275,10 @@
+ }
+
+ TypeKind::TemplateInstantiation(ref inst) => {
+- trace!(" template instantiations are zero-sized if their \
+- definition is zero-sized");
++ trace!(
++ " template instantiations are zero-sized if their \
++ definition is zero-sized"
++ );
+ self.forward(inst.template_definition(), id)
+ }
+
+@@ -305,7 +302,8 @@
+ return self.insert(id, SizednessResult::NonZeroSized);
+ }
+
+- let result = info.base_members()
++ let result = info
++ .base_members()
+ .iter()
+ .filter_map(|base| self.sized.get(&base.ty))
+ .fold(SizednessResult::ZeroSized, |a, b| a.join(*b));
+@@ -339,9 +337,10 @@
+ impl<'ctx> From<SizednessAnalysis<'ctx>> for HashMap<TypeId, SizednessResult> {
+ fn from(analysis: SizednessAnalysis<'ctx>) -> Self {
+ // We let the lack of an entry mean "ZeroSized" to save space.
+- extra_assert!(analysis.sized.values().all(|v| {
+- *v != SizednessResult::ZeroSized
+- }));
++ extra_assert!(analysis
++ .sized
++ .values()
++ .all(|v| { *v != SizednessResult::ZeroSized }));
+
+ analysis.sized
+ }
+diff --git a/third_party/rust/bindgen/src/ir/analysis/template_params.rs b/third_party/rust/bindgen/src/ir/analysis/template_params.rs
+--- a/third_party/rust/bindgen/src/ir/analysis/template_params.rs
++++ b/third_party/rust/bindgen/src/ir/analysis/template_params.rs
+@@ -203,18 +203,21 @@
+ }
+ }
+
+- fn take_this_id_usage_set<Id: Into<ItemId>>(&mut self, this_id: Id) -> ItemSet {
++ fn take_this_id_usage_set<Id: Into<ItemId>>(
++ &mut self,
++ this_id: Id,
++ ) -> ItemSet {
+ let this_id = this_id.into();
+ self.used
+ .get_mut(&this_id)
+ .expect(
+ "Should have a set of used template params for every item \
+- id",
++ id",
+ )
+ .take()
+ .expect(
+ "Should maintain the invariant that all used template param \
+- sets are `Some` upon entry of `constrain`",
++ sets are `Some` upon entry of `constrain`",
+ )
+ }
+
+@@ -231,7 +234,7 @@
+ ) {
+ trace!(
+ " instantiation of blacklisted template, uses all template \
+- arguments"
++ arguments"
+ );
+
+ let args = instantiation
+@@ -252,8 +255,8 @@
+ .as_ref()
+ .expect(
+ "Because a != this_id, and all used template \
+- param sets other than this_id's are `Some`, \
+- a's used template param set should be `Some`",
++ param sets other than this_id's are `Some`, \
++ a's used template param set should be `Some`",
+ )
+ .iter()
+ .cloned()
+@@ -289,7 +292,7 @@
+ for (arg, param) in args.iter().zip(params.iter()) {
+ trace!(
+ " instantiation's argument {:?} is used if definition's \
+- parameter {:?} is used",
++ parameter {:?} is used",
+ arg,
+ param
+ );
+@@ -297,7 +300,8 @@
+ if used_by_def.contains(&param.into()) {
+ trace!(" param is used by template definition");
+
+- let arg = arg.into_resolver()
++ let arg = arg
++ .into_resolver()
+ .through_type_refs()
+ .through_type_aliases()
+ .resolve(self.ctx)
+@@ -307,15 +311,16 @@
+ continue;
+ }
+
+- let used_by_arg = self.used
++ let used_by_arg = self
++ .used
+ .get(&arg)
+ .expect("Should have a used entry for the template arg")
+ .as_ref()
+ .expect(
+ "Because arg != this_id, and all used template \
+- param sets other than this_id's are `Some`, \
+- arg's used template param set should be \
+- `Some`",
++ param sets other than this_id's are `Some`, \
++ arg's used template param set should be \
++ `Some`",
+ )
+ .iter()
+ .cloned();
+@@ -339,7 +344,8 @@
+ return;
+ }
+
+- let used_by_sub_id = self.used
++ let used_by_sub_id = self
++ .used
+ .get(&sub_id)
+ .expect("Should have a used set for the sub_id successor")
+ .as_ref()
+@@ -370,9 +376,7 @@
+ type Extra = &'ctx BindgenContext;
+ type Output = HashMap<ItemId, ItemSet>;
+
+- fn new(
+- ctx: &'ctx BindgenContext,
+- ) -> UsedTemplateParameters<'ctx> {
++ fn new(ctx: &'ctx BindgenContext) -> UsedTemplateParameters<'ctx> {
+ let mut used = HashMap::default();
+ let mut dependencies = HashMap::default();
+ let whitelisted_items: HashSet<_> =
+@@ -383,7 +387,13 @@
+ .cloned()
+ .flat_map(|i| {
+ let mut reachable = vec![i];
+- i.trace(ctx, &mut |s, _| { reachable.push(s); }, &());
++ i.trace(
++ ctx,
++ &mut |s, _| {
++ reachable.push(s);
++ },
++ &(),
++ );
+ reachable
+ })
+ .collect();
+@@ -399,9 +409,10 @@
+ ctx,
+ &mut |sub_item: ItemId, _| {
+ used.entry(sub_item).or_insert(Some(ItemSet::new()));
+- dependencies.entry(sub_item).or_insert(vec![]).push(
+- item,
+- );
++ dependencies
++ .entry(sub_item)
++ .or_insert(vec![])
++ .push(item);
+ },
+ &(),
+ );
+@@ -418,11 +429,11 @@
+ // Although template definitions should always have
+ // template parameters, there is a single exception:
+ // opaque templates. Hence the unwrap_or.
+- let params =
+- decl.self_template_params(ctx);
++ let params = decl.self_template_params(ctx);
+
+ for (arg, param) in args.iter().zip(params.iter()) {
+- let arg = arg.into_resolver()
++ let arg = arg
++ .into_resolver()
+ .through_type_aliases()
+ .through_type_refs()
+ .resolve(ctx)
+@@ -487,7 +498,13 @@
+ .cloned()
+ .flat_map(|i| {
+ let mut reachable = vec![i];
+- i.trace(self.ctx, &mut |s, _| { reachable.push(s); }, &());
++ i.trace(
++ self.ctx,
++ &mut |s, _| {
++ reachable.push(s);
++ },
++ &(),
++ );
+ reachable
+ })
+ .collect()
+@@ -520,9 +537,9 @@
+ // Template instantiations only use their template arguments if the
+ // template definition uses the corresponding template parameter.
+ Some(&TypeKind::TemplateInstantiation(ref inst)) => {
+- if self.whitelisted_items.contains(
+- &inst.template_definition().into(),
+- )
++ if self
++ .whitelisted_items
++ .contains(&inst.template_definition().into())
+ {
+ self.constrain_instantiation(
+ id,
+@@ -548,7 +565,7 @@
+ assert!(
+ new_len >= original_len,
+ "This is the property that ensures this function is monotone -- \
+- if it doesn't hold, the analysis might never terminate!"
++ if it doesn't hold, the analysis might never terminate!"
+ );
+
+ // Put the set back in the hash map and restore our invariant.
+@@ -576,8 +593,7 @@
+ }
+ }
+
+-impl<'ctx> From<UsedTemplateParameters<'ctx>>
+- for HashMap<ItemId, ItemSet> {
++impl<'ctx> From<UsedTemplateParameters<'ctx>> for HashMap<ItemId, ItemSet> {
+ fn from(used_templ_params: UsedTemplateParameters<'ctx>) -> Self {
+ used_templ_params
+ .used
+diff --git a/third_party/rust/bindgen/src/ir/annotations.rs b/third_party/rust/bindgen/src/ir/annotations.rs
+--- a/third_party/rust/bindgen/src/ir/annotations.rs
++++ b/third_party/rust/bindgen/src/ir/annotations.rs
+@@ -94,7 +94,11 @@
+ let mut matched_one = false;
+ anno.parse(&cursor.comment(), &mut matched_one);
+
+- if matched_one { Some(anno) } else { None }
++ if matched_one {
++ Some(anno)
++ } else {
++ None
++ }
+ }
+
+ /// Should this type be hidden?
+@@ -157,9 +161,10 @@
+ use clang_sys::CXComment_HTMLStartTag;
+ if comment.kind() == CXComment_HTMLStartTag &&
+ comment.get_tag_name() == "div" &&
+- comment.get_tag_attrs().next().map_or(false, |attr| {
+- attr.name == "rustbindgen"
+- })
++ comment
++ .get_tag_attrs()
++ .next()
++ .map_or(false, |attr| attr.name == "rustbindgen")
+ {
+ *matched = true;
+ for attr in comment.get_tag_attrs() {
+@@ -168,14 +173,11 @@
+ "hide" => self.hide = true,
+ "nocopy" => self.disallow_copy = true,
+ "replaces" => {
+- self.use_instead_of =
+- Some(
+- attr.value.split("::").map(Into::into).collect(),
+- )
++ self.use_instead_of = Some(
++ attr.value.split("::").map(Into::into).collect(),
++ )
+ }
+- "derive" => {
+- self.derives.push(attr.value)
+- }
++ "derive" => self.derives.push(attr.value),
+ "private" => {
+ self.private_fields = Some(attr.value != "false")
+ }
+diff --git a/third_party/rust/bindgen/src/ir/comment.rs b/third_party/rust/bindgen/src/ir/comment.rs
+--- a/third_party/rust/bindgen/src/ir/comment.rs
++++ b/third_party/rust/bindgen/src/ir/comment.rs
+@@ -67,7 +67,8 @@
+ let indent = make_indent(indent);
+ // Strip any potential `*` characters preceding each line.
+ let mut is_first = true;
+- let mut lines: Vec<_> = comment.lines()
++ let mut lines: Vec<_> = comment
++ .lines()
+ .map(|line| line.trim().trim_start_matches('*').trim_start_matches('!'))
+ .skip_while(|line| line.trim().is_empty()) // Skip the first empty lines.
+ .map(|line| {
+@@ -78,7 +79,10 @@
+ .collect();
+
+ // Remove the trailing line corresponding to the `*/`.
+- if lines.last().map_or(false, |l| l.trim().is_empty() || l.trim() == "///") {
++ if lines
++ .last()
++ .map_or(false, |l| l.trim().is_empty() || l.trim() == "///")
++ {
+ lines.pop();
+ }
+
+diff --git a/third_party/rust/bindgen/src/ir/comp.rs b/third_party/rust/bindgen/src/ir/comp.rs
+--- a/third_party/rust/bindgen/src/ir/comp.rs
++++ b/third_party/rust/bindgen/src/ir/comp.rs
+@@ -52,13 +52,13 @@
+ },
+ }
+
+-
+ impl MethodKind {
+ /// Is this a destructor method?
+ pub fn is_destructor(&self) -> bool {
+ match *self {
+- MethodKind::Destructor |
+- MethodKind::VirtualDestructor { .. } => true,
++ MethodKind::Destructor | MethodKind::VirtualDestructor { .. } => {
++ true
++ }
+ _ => false,
+ }
+ }
+@@ -87,7 +87,11 @@
+
+ impl Method {
+ /// Construct a new `Method`.
+- pub fn new(kind: MethodKind, signature: FunctionId, is_const: bool) -> Self {
++ pub fn new(
++ kind: MethodKind,
++ signature: FunctionId,
++ is_const: bool,
++ ) -> Self {
+ Method {
+ kind,
+ signature,
+@@ -198,9 +202,7 @@
+ /// Get this field's layout.
+ pub fn layout(&self, ctx: &BindgenContext) -> Option<Layout> {
+ match *self {
+- Field::Bitfields(BitfieldUnit {
+- layout, ..
+- }) => Some(layout),
++ Field::Bitfields(BitfieldUnit { layout, .. }) => Some(layout),
+ Field::DataMember(ref data) => {
+ ctx.resolve_type(data.ty).layout(ctx)
+ }
+@@ -219,9 +221,7 @@
+ Field::DataMember(ref data) => {
+ tracer.visit_kind(data.ty.into(), EdgeKind::Field);
+ }
+- Field::Bitfields(BitfieldUnit {
+- ref bitfields, ..
+- }) => {
++ Field::Bitfields(BitfieldUnit { ref bitfields, .. }) => {
+ for bf in bitfields {
+ tracer.visit_kind(bf.ty().into(), EdgeKind::Field);
+ }
+@@ -242,10 +242,10 @@
+ match *self {
+ Field::DataMember(ref data) => data.dot_attributes(ctx, out),
+ Field::Bitfields(BitfieldUnit {
+- layout,
+- ref bitfields,
+- ..
+- }) => {
++ layout,
++ ref bitfields,
++ ..
++ }) => {
+ writeln!(
+ out,
+ r#"<tr>
+@@ -259,8 +259,7 @@
+ <td>unit.align</td><td>{}</td>
+ </tr>
+ "#,
+- layout.size,
+- layout.align
++ layout.size, layout.align
+ )?;
+ for bf in bitfields {
+ bf.dot_attributes(ctx, out)?;
+@@ -373,7 +372,10 @@
+ /// Panics if called before assigning bitfield accessor names or if
+ /// this bitfield have no name.
+ pub fn getter_name(&self) -> &str {
+- assert!(self.name().is_some(), "`Bitfield::getter_name` called on anonymous field");
++ assert!(
++ self.name().is_some(),
++ "`Bitfield::getter_name` called on anonymous field"
++ );
+ self.getter_name.as_ref().expect(
+ "`Bitfield::getter_name` should only be called after\
+ assigning bitfield accessor names",
+@@ -385,7 +387,10 @@
+ /// Panics if called before assigning bitfield accessor names or if
+ /// this bitfield have no name.
+ pub fn setter_name(&self) -> &str {
+- assert!(self.name().is_some(), "`Bitfield::setter_name` called on anonymous field");
++ assert!(
++ self.name().is_some(),
++ "`Bitfield::setter_name` called on anonymous field"
++ );
+ self.setter_name.as_ref().expect(
+ "`Bitfield::setter_name` should only be called\
+ after assigning bitfield accessor names",
+@@ -423,7 +428,6 @@
+ }
+ }
+
+-
+ /// A raw field might be either of a plain data member or a bitfield within a
+ /// bitfield allocation unit, but we haven't processed it and determined which
+ /// yet (which would involve allocating it into a bitfield unit if it is a
+@@ -597,9 +601,8 @@
+
+ for bitfield in raw_bitfields {
+ let bitfield_width = bitfield.bitfield_width().unwrap() as usize;
+- let bitfield_layout = ctx.resolve_type(bitfield.ty())
+- .layout(ctx)
+- .ok_or(())?;
++ let bitfield_layout =
++ ctx.resolve_type(bitfield.ty()).layout(ctx).ok_or(())?;
+ let bitfield_size = bitfield_layout.size;
+ let bitfield_align = bitfield_layout.align;
+
+@@ -628,8 +631,8 @@
+ } else {
+ if offset != 0 &&
+ (bitfield_width == 0 ||
+- (offset & (bitfield_align * 8 - 1)) + bitfield_width >
+- bitfield_size * 8)
++ (offset & (bitfield_align * 8 - 1)) + bitfield_width >
++ bitfield_size * 8)
+ {
+ offset = align_to(offset, bitfield_align * 8);
+ }
+@@ -724,20 +727,17 @@
+ }
+ };
+
+- let result =
+- raw_fields_to_fields_and_bitfield_units(ctx, raws);
++ let result = raw_fields_to_fields_and_bitfield_units(ctx, raws);
+
+ match result {
+ Ok(fields_and_units) => {
+ mem::replace(
+ self,
+- CompFields::AfterComputingBitfieldUnits(fields_and_units));
++ CompFields::AfterComputingBitfieldUnits(fields_and_units),
++ );
+ }
+ Err(()) => {
+- mem::replace(
+- self,
+- CompFields::ErrorComputingBitfieldUnits
+- );
++ mem::replace(self, CompFields::ErrorComputingBitfieldUnits);
+ }
+ }
+ }
+@@ -754,7 +754,11 @@
+ }
+ };
+
+- fn has_method(methods: &[Method], ctx: &BindgenContext, name: &str) -> bool {
++ fn has_method(
++ methods: &[Method],
++ ctx: &BindgenContext,
++ name: &str,
++ ) -> bool {
+ methods.iter().any(|method| {
+ let method_name = ctx.resolve_func(method.signature()).name();
+ method_name == name || ctx.rust_mangle(&method_name) == name
+@@ -776,7 +780,8 @@
+ .map(|bitfield_name| {
+ let bitfield_name = bitfield_name.to_string();
+ let getter = {
+- let mut getter = ctx.rust_mangle(&bitfield_name).to_string();
++ let mut getter =
++ ctx.rust_mangle(&bitfield_name).to_string();
+ if has_method(methods, ctx, &getter) {
+ getter.push_str("_bindgen_bitfield");
+ }
+@@ -803,21 +808,24 @@
+ }
+
+ anon_field_counter += 1;
+- let generated_name = format!("__bindgen_anon_{}", anon_field_counter);
++ let generated_name =
++ format!("__bindgen_anon_{}", anon_field_counter);
+ *name = Some(generated_name);
+ }
+- Field::Bitfields(ref mut bu) => for bitfield in &mut bu.bitfields {
+- if bitfield.name().is_none() {
+- continue;
+- }
++ Field::Bitfields(ref mut bu) => {
++ for bitfield in &mut bu.bitfields {
++ if bitfield.name().is_none() {
++ continue;
++ }
+
+- if let Some(AccessorNamesPair { getter, setter }) =
+- accessor_names.remove(bitfield.name().unwrap())
+- {
+- bitfield.getter_name = Some(getter);
+- bitfield.setter_name = Some(setter);
++ if let Some(AccessorNamesPair { getter, setter }) =
++ accessor_names.remove(bitfield.name().unwrap())
++ {
++ bitfield.getter_name = Some(getter);
++ bitfield.setter_name = Some(setter);
++ }
+ }
+- },
++ }
+ }
+ }
+ }
+@@ -1185,8 +1193,7 @@
+ let mut ci = CompInfo::new(kind);
+ ci.is_forward_declaration =
+ location.map_or(true, |cur| match cur.kind() {
+- CXCursor_StructDecl |
+- CXCursor_UnionDecl |
++ CXCursor_StructDecl | CXCursor_UnionDecl |
+ CXCursor_ClassDecl => !cur.is_definition(),
+ _ => false,
+ });
+@@ -1195,16 +1202,20 @@
+ cursor.visit(|cur| {
+ if cur.kind() != CXCursor_FieldDecl {
+ if let Some((ty, clang_ty, offset)) =
+- maybe_anonymous_struct_field.take() {
++ maybe_anonymous_struct_field.take()
++ {
+ if cur.kind() == CXCursor_TypedefDecl &&
+- cur.typedef_type().unwrap().canonical_type() == clang_ty {
++ cur.typedef_type().unwrap().canonical_type() ==
++ clang_ty
++ {
+ // Typedefs of anonymous structs appear later in the ast
+ // than the struct itself, that would otherwise be an
+ // anonymous field. Detect that case here, and do
+ // nothing.
+ } else {
+- let field =
+- RawField::new(None, ty, None, None, None, false, offset);
++ let field = RawField::new(
++ None, ty, None, None, None, false, offset,
++ );
+ ci.fields.append_raw_field(field);
+ }
+ }
+@@ -1213,7 +1224,8 @@
+ match cur.kind() {
+ CXCursor_FieldDecl => {
+ if let Some((ty, clang_ty, offset)) =
+- maybe_anonymous_struct_field.take() {
++ maybe_anonymous_struct_field.take()
++ {
+ let mut used = false;
+ cur.visit(|child| {
+ if child.cur_type() == clang_ty {
+@@ -1222,22 +1234,20 @@
+ CXChildVisit_Continue
+ });
+ if !used {
+- let field = RawField::new(None,
+- ty,
+- None,
+- None,
+- None,
+- false,
+- offset);
++ let field = RawField::new(
++ None, ty, None, None, None, false, offset,
++ );
+ ci.fields.append_raw_field(field);
+ }
+ }
+
+ let bit_width = cur.bit_width();
+- let field_type = Item::from_ty_or_ref(cur.cur_type(),
+- cur,
+- Some(potential_id),
+- ctx);
++ let field_type = Item::from_ty_or_ref(
++ cur.cur_type(),
++ cur,
++ Some(potential_id),
++ ctx,
++ );
+
+ let comment = cur.raw_comment();
+ let annotations = Annotations::new(&cur);
+@@ -1247,18 +1257,22 @@
+
+ // Name can be empty if there are bitfields, for example,
+ // see tests/headers/struct_with_bitfields.h
+- assert!(!name.is_empty() || bit_width.is_some(),
+- "Empty field name?");
++ assert!(
++ !name.is_empty() || bit_width.is_some(),
++ "Empty field name?"
++ );
+
+ let name = if name.is_empty() { None } else { Some(name) };
+
+- let field = RawField::new(name,
+- field_type,
+- comment,
+- annotations,
+- bit_width,
+- is_mutable,
+- offset);
++ let field = RawField::new(
++ name,
++ field_type,
++ comment,
++ annotations,
++ bit_width,
++ is_mutable,
++ offset,
++ );
+ ci.fields.append_raw_field(field);
+
+ // No we look for things like attributes and stuff.
+@@ -1268,7 +1282,6 @@
+ }
+ CXChildVisit_Continue
+ });
+-
+ }
+ CXCursor_UnexposedAttr => {
+ ci.found_unknown_attr = true;
+@@ -1293,8 +1306,8 @@
+ // definition, it's a valid inner type.
+ //
+ // [1]: https://github.com/rust-lang/rust-bindgen/issues/482
+- let is_inner_struct = cur.semantic_parent() == cursor ||
+- cur.is_definition();
++ let is_inner_struct =
++ cur.semantic_parent() == cursor || cur.is_definition();
+ if !is_inner_struct {
+ return CXChildVisit_Continue;
+ }
+@@ -1311,19 +1324,22 @@
+ // A declaration of an union or a struct without name could
+ // also be an unnamed field, unfortunately.
+ if cur.spelling().is_empty() &&
+- cur.kind() != CXCursor_EnumDecl {
++ cur.kind() != CXCursor_EnumDecl
++ {
+ let ty = cur.cur_type();
+ let offset = cur.offset_of_field().ok();
+- maybe_anonymous_struct_field = Some((inner, ty, offset));
++ maybe_anonymous_struct_field =
++ Some((inner, ty, offset));
+ }
+ }
+ CXCursor_PackedAttr => {
+ ci.packed_attr = true;
+ }
+ CXCursor_TemplateTypeParameter => {
+- let param = Item::type_param(None, cur, ctx)
+- .expect("Item::type_param should't fail when pointing \
+- at a TemplateTypeParameter");
++ let param = Item::type_param(None, cur, ctx).expect(
++ "Item::type_param should't fail when pointing \
++ at a TemplateTypeParameter",
++ );
+ ci.template_params.push(param);
+ }
+ CXCursor_CXXBaseSpecifier => {
+@@ -1348,8 +1364,7 @@
+ field_name: field_name,
+ });
+ }
+- CXCursor_Constructor |
+- CXCursor_Destructor |
++ CXCursor_Constructor | CXCursor_Destructor |
+ CXCursor_CXXMethod => {
+ let is_virtual = cur.method_is_virtual();
+ let is_static = cur.method_is_static();
+@@ -1376,9 +1391,14 @@
+ // `FunctionSig`.
+ let signature =
+ match Item::parse(cur, Some(potential_id), ctx) {
+- Ok(item) if ctx.resolve_item(item)
+- .kind()
+- .is_function() => item,
++ Ok(item)
++ if ctx
++ .resolve_item(item)
++ .kind()
++ .is_function() =>
++ {
++ item
++ }
+ _ => return CXChildVisit_Continue,
+ };
+
+@@ -1424,7 +1444,8 @@
+ CXCursor_VarDecl => {
+ let linkage = cur.linkage();
+ if linkage != CXLinkage_External &&
+- linkage != CXLinkage_UniqueExternal {
++ linkage != CXLinkage_UniqueExternal
++ {
+ return CXChildVisit_Continue;
+ }
+
+@@ -1433,9 +1454,8 @@
+ return CXChildVisit_Continue;
+ }
+
+- if let Ok(item) = Item::parse(cur,
+- Some(potential_id),
+- ctx) {
++ if let Ok(item) = Item::parse(cur, Some(potential_id), ctx)
++ {
+ ci.inner_vars.push(item.as_var_id_unchecked());
+ }
+ }
+@@ -1445,11 +1465,13 @@
+ CXCursor_FunctionTemplate |
+ CXCursor_ConversionFunction => {}
+ _ => {
+- warn!("unhandled comp member `{}` (kind {:?}) in `{}` ({})",
+- cur.spelling(),
+- clang::kind_to_str(cur.kind()),
+- cursor.spelling(),
+- cur.location());
++ warn!(
++ "unhandled comp member `{}` (kind {:?}) in `{}` ({})",
++ cur.spelling(),
++ clang::kind_to_str(cur.kind()),
++ cursor.spelling(),
++ cur.location()
++ );
+ }
+ }
+ CXChildVisit_Continue
+@@ -1470,16 +1492,13 @@
+ use clang_sys::*;
+ Ok(match cursor.kind() {
+ CXCursor_UnionDecl => CompKind::Union,
+- CXCursor_ClassDecl |
+- CXCursor_StructDecl => CompKind::Struct,
++ CXCursor_ClassDecl | CXCursor_StructDecl => CompKind::Struct,
+ CXCursor_CXXBaseSpecifier |
+ CXCursor_ClassTemplatePartialSpecialization |
+- CXCursor_ClassTemplate => {
+- match cursor.template_kind() {
+- CXCursor_UnionDecl => CompKind::Union,
+- _ => CompKind::Struct,
+- }
+- }
++ CXCursor_ClassTemplate => match cursor.template_kind() {
++ CXCursor_UnionDecl => CompKind::Union,
++ _ => CompKind::Struct,
++ },
+ _ => {
+ warn!("Unknown kind for comp type: {:?}", cursor);
+ return Err(ParseError::Continue);
+@@ -1505,9 +1524,13 @@
+ }
+
+ /// Is this compound type packed?
+- pub fn is_packed(&self, ctx: &BindgenContext, layout: &Option<Layout>) -> bool {
++ pub fn is_packed(
++ &self,
++ ctx: &BindgenContext,
++ layout: &Option<Layout>,
++ ) -> bool {
+ if self.packed_attr {
+- return true
++ return true;
+ }
+
+ // Even though `libclang` doesn't expose `#pragma packed(...)`, we can
+@@ -1632,7 +1655,7 @@
+
+ fn is_opaque(&self, ctx: &BindgenContext, layout: &Option<Layout>) -> bool {
+ if self.has_non_type_template_params {
+- return true
++ return true;
+ }
+
+ // When we do not have the layout for a bitfield's type (for example, it
+@@ -1647,17 +1670,14 @@
+ // some strange things going on, and the best we can do is make the
+ // whole struct opaque.
+ if self.fields().iter().any(|f| match *f {
+- Field::DataMember(_) => {
+- false
+- },
+- Field::Bitfields(ref unit) => {
+- unit.bitfields().iter().any(|bf| {
+- let bitfield_layout = ctx.resolve_type(bf.ty())
+- .layout(ctx)
+- .expect("Bitfield without layout? Gah!");
+- bf.width() / 8 > bitfield_layout.size as u32
+- })
+- }
++ Field::DataMember(_) => false,
++ Field::Bitfields(ref unit) => unit.bitfields().iter().any(|bf| {
++ let bitfield_layout = ctx
++ .resolve_type(bf.ty())
++ .layout(ctx)
++ .expect("Bitfield without layout? Gah!");
++ bf.width() / 8 > bitfield_layout.size as u32
++ }),
+ }) {
+ return true;
+ }
+@@ -1668,7 +1688,9 @@
+ //
+ // See https://github.com/rust-lang/rust-bindgen/issues/537 and
+ // https://github.com/rust-lang/rust/issues/33158
+- if self.is_packed(ctx, layout) && layout.map_or(false, |l| l.align > 1) {
++ if self.is_packed(ctx, layout) &&
++ layout.map_or(false, |l| l.align > 1)
++ {
+ warn!("Found a type that is both packed and aligned to greater than \
+ 1; Rust before version 1.33 doesn't have `#[repr(packed(N))]`, so we \
+ are treating it as opaque. You may wish to set bindgen's rust target \
+@@ -1682,10 +1704,7 @@
+ }
+
+ impl TemplateParameters for CompInfo {
+- fn self_template_params(
+- &self,
+- _ctx: &BindgenContext,
+- ) -> Vec<TypeId> {
++ fn self_template_params(&self, _ctx: &BindgenContext) -> Vec<TypeId> {
+ self.template_params.clone()
+ }
+ }
+diff --git a/third_party/rust/bindgen/src/ir/context.rs b/third_party/rust/bindgen/src/ir/context.rs
+--- a/third_party/rust/bindgen/src/ir/context.rs
++++ b/third_party/rust/bindgen/src/ir/context.rs
+@@ -1,13 +1,17 @@
+ //! Common context that is passed around during parsing and codegen.
+
+-use super::analysis::{CannotDerive, DeriveTrait, as_cannot_derive_set,
+- HasTypeParameterInArray, HasVtableAnalysis,
+- HasVtableResult, HasDestructorAnalysis,
+- UsedTemplateParameters, HasFloat, SizednessAnalysis,
+- SizednessResult, analyze};
+-use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault,
+- CanDeriveHash, CanDerivePartialOrd, CanDeriveOrd,
+- CanDerivePartialEq, CanDeriveEq, CanDerive};
++use super::super::time::Timer;
++use super::analysis::{
++ analyze, as_cannot_derive_set, CannotDerive, DeriveTrait,
++ HasDestructorAnalysis, HasFloat, HasTypeParameterInArray,
++ HasVtableAnalysis, HasVtableResult, SizednessAnalysis, SizednessResult,
++ UsedTemplateParameters,
++};
++use super::derive::{
++ CanDerive, CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq,
++ CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd,
++};
++use super::function::Function;
+ use super::int::IntKind;
+ use super::item::{IsOpaque, Item, ItemAncestors, ItemSet};
+ use super::item_kind::ItemKind;
+@@ -15,9 +19,6 @@
+ use super::template::{TemplateInstantiation, TemplateParameters};
+ use super::traversal::{self, Edge, ItemTraversal};
+ use super::ty::{FloatKind, Type, TypeKind};
+-use super::function::Function;
+-use super::super::time::Timer;
+-use BindgenOptions;
+ use callbacks::ParseCallbacks;
+ use cexpr;
+ use clang::{self, Cursor};
+@@ -26,10 +27,11 @@
+ use proc_macro2::{Ident, Span};
+ use std::borrow::Cow;
+ use std::cell::Cell;
++use std::collections::HashMap as StdHashMap;
+ use std::iter::IntoIterator;
+ use std::mem;
+-use std::collections::HashMap as StdHashMap;
+-use {HashMap, HashSet, Entry};
++use BindgenOptions;
++use {Entry, HashMap, HashSet};
+
+ /// An identifier for some kind of IR item.
+ #[derive(Debug, Copy, Clone, Eq, PartialOrd, Ord, Hash)]
+@@ -199,7 +201,7 @@
+
+ impl<T> ::std::cmp::PartialEq<T> for ItemId
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn eq(&self, rhs: &T) -> bool {
+ let rhs: ItemId = (*rhs).into();
+@@ -209,7 +211,7 @@
+
+ impl<T> CanDeriveDebug for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_debug(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_debug && ctx.lookup_can_derive_debug(*self)
+@@ -218,7 +220,7 @@
+
+ impl<T> CanDeriveDefault for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_default(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_default && ctx.lookup_can_derive_default(*self)
+@@ -227,7 +229,7 @@
+
+ impl<T> CanDeriveCopy for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_copy(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_copy && ctx.lookup_can_derive_copy(*self)
+@@ -236,7 +238,7 @@
+
+ impl<T> CanDeriveHash for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_hash(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_hash && ctx.lookup_can_derive_hash(*self)
+@@ -245,42 +247,46 @@
+
+ impl<T> CanDerivePartialOrd for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_partialord(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_partialord &&
+- ctx.lookup_can_derive_partialeq_or_partialord(*self) == CanDerive::Yes
++ ctx.lookup_can_derive_partialeq_or_partialord(*self) ==
++ CanDerive::Yes
+ }
+ }
+
+ impl<T> CanDerivePartialEq for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_partialeq(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_partialeq &&
+- ctx.lookup_can_derive_partialeq_or_partialord(*self) == CanDerive::Yes
++ ctx.lookup_can_derive_partialeq_or_partialord(*self) ==
++ CanDerive::Yes
+ }
+ }
+
+ impl<T> CanDeriveEq for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_eq(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_eq &&
+- ctx.lookup_can_derive_partialeq_or_partialord(*self) == CanDerive::Yes &&
++ ctx.lookup_can_derive_partialeq_or_partialord(*self) ==
++ CanDerive::Yes &&
+ !ctx.lookup_has_float(*self)
+ }
+ }
+
+ impl<T> CanDeriveOrd for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn can_derive_ord(&self, ctx: &BindgenContext) -> bool {
+ ctx.options().derive_ord &&
+- ctx.lookup_can_derive_partialeq_or_partialord(*self) == CanDerive::Yes &&
++ ctx.lookup_can_derive_partialeq_or_partialord(*self) ==
++ CanDerive::Yes &&
+ !ctx.lookup_has_float(*self)
+ }
+ }
+@@ -476,7 +482,7 @@
+ let id = self.traversal.next()?;
+
+ if self.ctx.resolve_item(id).is_blacklisted(self.ctx) {
+- continue
++ continue;
+ }
+
+ return Some(id);
+@@ -526,7 +532,7 @@
+
+ // If we're running from a build script, try to find the cargo target.
+ if let Ok(t) = env::var("TARGET") {
+- return (t, false)
++ return (t, false);
+ }
+
+ (HOST_TARGET.to_owned(), false)
+@@ -577,8 +583,11 @@
+ if let Some(ref ti) = target_info {
+ if effective_target == HOST_TARGET {
+ assert_eq!(
+- ti.pointer_width / 8, mem::size_of::<*mut ()>(),
+- "{:?} {:?}", effective_target, HOST_TARGET
++ ti.pointer_width / 8,
++ mem::size_of::<*mut ()>(),
++ "{:?} {:?}",
++ effective_target,
++ HOST_TARGET
+ );
+ }
+ }
+@@ -661,7 +670,7 @@
+ }
+
+ /// Get the user-provided callbacks by reference, if any.
+- pub fn parse_callbacks(&self) -> Option<&ParseCallbacks> {
++ pub fn parse_callbacks(&self) -> Option<&dyn ParseCallbacks> {
+ self.options().parse_callbacks.as_ref().map(|t| &**t)
+ }
+
+@@ -677,12 +686,11 @@
+ ) {
+ debug!(
+ "BindgenContext::add_item({:?}, declaration: {:?}, loc: {:?}",
+- item,
+- declaration,
+- location
++ item, declaration, location
+ );
+ debug_assert!(
+- declaration.is_some() || !item.kind().is_type() ||
++ declaration.is_some() ||
++ !item.kind().is_type() ||
+ item.kind().expect_type().is_builtin_or_type_param() ||
+ item.kind().expect_type().is_opaque(self, &item) ||
+ item.kind().expect_type().is_unresolved_ref(),
+@@ -692,8 +700,8 @@
+ let id = item.id();
+ let is_type = item.kind().is_type();
+ let is_unnamed = is_type && item.expect_type().name().is_none();
+- let is_template_instantiation = is_type &&
+- item.expect_type().is_template_instantiation();
++ let is_template_instantiation =
++ is_type && item.expect_type().is_template_instantiation();
+
+ if item.id() != self.root_module {
+ self.add_item_to_module(&item);
+@@ -731,7 +739,10 @@
+ debug!(
+ "Invalid declaration {:?} found for type {:?}",
+ declaration,
+- self.resolve_item_fallible(id).unwrap().kind().expect_type()
++ self.resolve_item_fallible(id)
++ .unwrap()
++ .kind()
++ .expect_type()
+ );
+ return;
+ }
+@@ -743,8 +754,7 @@
+ } else {
+ warn!(
+ "Valid declaration with no USR: {:?}, {:?}",
+- declaration,
+- location
++ declaration, location
+ );
+ TypeKey::Declaration(declaration)
+ };
+@@ -794,8 +804,7 @@
+ pub fn add_type_param(&mut self, item: Item, definition: clang::Cursor) {
+ debug!(
+ "BindgenContext::add_type_param: item = {:?}; definition = {:?}",
+- item,
+- definition
++ item, definition
+ );
+
+ assert!(
+@@ -816,7 +825,9 @@
+ "should not have already associated an item with the given id"
+ );
+
+- let old_named_ty = self.type_params.insert(definition, id.as_type_id_unchecked());
++ let old_named_ty = self
++ .type_params
++ .insert(definition, id.as_type_id_unchecked());
+ assert!(
+ old_named_ty.is_none(),
+ "should not have already associated a named type with this id"
+@@ -841,61 +852,16 @@
+ name.contains("?") ||
+ name.contains("$") ||
+ match name {
+- "abstract" |
+- "alignof" |
+- "as" |
+- "async" |
+- "become" |
+- "box" |
+- "break" |
+- "const" |
+- "continue" |
+- "crate" |
+- "do" |
+- "else" |
+- "enum" |
+- "extern" |
+- "false" |
+- "final" |
+- "fn" |
+- "for" |
+- "if" |
+- "impl" |
+- "in" |
+- "let" |
+- "loop" |
+- "macro" |
+- "match" |
+- "mod" |
+- "move" |
+- "mut" |
+- "offsetof" |
+- "override" |
+- "priv" |
+- "proc" |
+- "pub" |
+- "pure" |
+- "ref" |
+- "return" |
+- "Self" |
+- "self" |
+- "sizeof" |
+- "static" |
+- "struct" |
+- "super" |
+- "trait" |
+- "true" |
+- "type" |
+- "typeof" |
+- "unsafe" |
+- "unsized" |
+- "use" |
+- "virtual" |
+- "where" |
+- "while" |
+- "yield" |
+- "bool" |
+- "_" => true,
++ "abstract" | "alignof" | "as" | "async" | "become" |
++ "box" | "break" | "const" | "continue" | "crate" | "do" |
++ "else" | "enum" | "extern" | "false" | "final" | "fn" |
++ "for" | "if" | "impl" | "in" | "let" | "loop" | "macro" |
++ "match" | "mod" | "move" | "mut" | "offsetof" |
++ "override" | "priv" | "proc" | "pub" | "pure" | "ref" |
++ "return" | "Self" | "self" | "sizeof" | "static" |
++ "struct" | "super" | "trait" | "true" | "type" | "typeof" |
++ "unsafe" | "unsized" | "use" | "virtual" | "where" |
++ "while" | "yield" | "bool" | "_" => true,
+ _ => false,
+ }
+ {
+@@ -912,7 +878,7 @@
+ /// Returns a mangled name as a rust identifier.
+ pub fn rust_ident<S>(&self, name: S) -> Ident
+ where
+- S: AsRef<str>
++ S: AsRef<str>,
+ {
+ self.rust_ident_raw(self.rust_mangle(name.as_ref()))
+ }
+@@ -920,20 +886,17 @@
+ /// Returns a mangled name as a rust identifier.
+ pub fn rust_ident_raw<T>(&self, name: T) -> Ident
+ where
+- T: AsRef<str>
++ T: AsRef<str>,
+ {
+ Ident::new(name.as_ref(), Span::call_site())
+ }
+
+ /// Iterate over all items that have been defined.
+ pub fn items(&self) -> impl Iterator<Item = (ItemId, &Item)> {
+- self.items
+- .iter()
+- .enumerate()
+- .filter_map(|(index, item)| {
+- let item = item.as_ref()?;
+- Some((ItemId(index), item))
+- })
++ self.items.iter().enumerate().filter_map(|(index, item)| {
++ let item = item.as_ref()?;
++ Some((ItemId(index), item))
++ })
+ }
+
+ /// Have we collected all unresolved type references yet?
+@@ -971,19 +934,20 @@
+ let typerefs = self.collect_typerefs();
+
+ for (id, ty, loc, parent_id) in typerefs {
+- let _resolved = {
+- let resolved = Item::from_ty(&ty, loc, parent_id, self)
++ let _resolved =
++ {
++ let resolved = Item::from_ty(&ty, loc, parent_id, self)
+ .unwrap_or_else(|_| {
+ warn!("Could not resolve type reference, falling back \
+ to opaque blob");
+ Item::new_opaque_type(self.next_item_id(), &ty, self)
+ });
+
+- let item = self.items[id.0].as_mut().unwrap();
+- *item.kind_mut().as_type_mut().unwrap().kind_mut() =
+- TypeKind::ResolvedTypeRef(resolved);
+- resolved
+- };
++ let item = self.items[id.0].as_mut().unwrap();
++ *item.kind_mut().as_type_mut().unwrap().kind_mut() =
++ TypeKind::ResolvedTypeRef(resolved);
++ resolved
++ };
+
+ // Something in the STL is trolling me. I don't need this assertion
+ // right now, but worth investigating properly once this lands.
+@@ -1008,7 +972,7 @@
+ /// closure is made.
+ fn with_loaned_item<F, T>(&mut self, id: ItemId, f: F) -> T
+ where
+- F: (FnOnce(&BindgenContext, &mut Item) -> T)
++ F: (FnOnce(&BindgenContext, &mut Item) -> T),
+ {
+ let mut item = self.items[id.0].take().unwrap();
+
+@@ -1043,7 +1007,8 @@
+ fn deanonymize_fields(&mut self) {
+ let _t = self.timer("deanonymize_fields");
+
+- let comp_item_ids: Vec<ItemId> = self.items()
++ let comp_item_ids: Vec<ItemId> = self
++ .items()
+ .filter_map(|(id, item)| {
+ if item.kind().as_type()?.is_comp() {
+ return Some(id);
+@@ -1108,7 +1073,10 @@
+ // We set this just after parsing the annotation. It's
+ // very unlikely, but this can happen.
+ if self.resolve_item_fallible(*replacement).is_some() {
+- replacements.push((id.expect_type_id(self), replacement.expect_type_id(self)));
++ replacements.push((
++ id.expect_type_id(self),
++ replacement.expect_type_id(self),
++ ));
+ }
+ }
+ }
+@@ -1157,15 +1125,14 @@
+ })
+ })
+ };
+- let old_module = old_module.expect(
+- "Every replacement item should be in a module",
+- );
++ let old_module = old_module
++ .expect("Every replacement item should be in a module");
+
+ let new_module = {
+ let immut_self = &*self;
+- new_parent.ancestors(immut_self).find(|id| {
+- immut_self.resolve_item(*id).is_module()
+- })
++ new_parent
++ .ancestors(immut_self)
++ .find(|id| immut_self.resolve_item(*id).is_module())
+ };
+ let new_module = new_module.unwrap_or(self.root_module.into());
+
+@@ -1274,26 +1241,25 @@
+
+ assert!(
+ {
+- let id = id.into_resolver()
++ let id = id
++ .into_resolver()
+ .through_type_refs()
+ .through_type_aliases()
+ .resolve(self)
+ .id();
+- id.ancestors(self).chain(Some(self.root_module.into())).any(
+- |ancestor| {
++ id.ancestors(self)
++ .chain(Some(self.root_module.into()))
++ .any(|ancestor| {
+ debug!(
+ "Checking if {:?} is a child of {:?}",
+- id,
+- ancestor
++ id, ancestor
+ );
+- self.resolve_item(ancestor).as_module().map_or(
+- false,
+- |m| {
++ self.resolve_item(ancestor)
++ .as_module()
++ .map_or(false, |m| {
+ m.children().contains(&id)
+- },
+- )
+- },
+- )
++ })
++ })
+ },
+ "{:?} should be in some ancestor module's children set",
+ id
+@@ -1377,7 +1343,10 @@
+ let mut used_params = HashMap::default();
+ for &id in self.whitelisted_items() {
+ used_params.entry(id).or_insert(
+- id.self_template_params(self).into_iter().map(|p| p.into()).collect()
++ id.self_template_params(self)
++ .into_iter()
++ .map(|p| p.into())
++ .collect(),
+ );
+ }
+ self.used_template_parameters = Some(used_params);
+@@ -1493,12 +1462,16 @@
+ ///
+ /// Panics if the id resolves to an item that is not a type.
+ pub fn safe_resolve_type(&self, type_id: TypeId) -> Option<&Type> {
+- self.resolve_item_fallible(type_id).map(|t| t.kind().expect_type())
++ self.resolve_item_fallible(type_id)
++ .map(|t| t.kind().expect_type())
+ }
+
+ /// Resolve the given `ItemId` into an `Item`, or `None` if no such item
+ /// exists.
+- pub fn resolve_item_fallible<Id: Into<ItemId>>(&self, id: Id) -> Option<&Item> {
++ pub fn resolve_item_fallible<Id: Into<ItemId>>(
++ &self,
++ id: Id,
++ ) -> Option<&Item> {
+ self.items.get(id.into().0)?.as_ref()
+ }
+
+@@ -1537,12 +1510,11 @@
+ /// Returns a known semantic parent for a given definition.
+ pub fn known_semantic_parent(
+ &self,
+- definition: clang::Cursor
++ definition: clang::Cursor,
+ ) -> Option<ItemId> {
+ self.semantic_parents.get(&definition).cloned()
+ }
+
+-
+ /// Given a cursor pointing to the location of a template instantiation,
+ /// return a tuple of the form `(declaration_cursor, declaration_id,
+ /// num_expected_template_args)`.
+@@ -1560,7 +1532,8 @@
+ .and_then(|canon_decl| {
+ self.get_resolved_type(&canon_decl).and_then(
+ |template_decl_id| {
+- let num_params = template_decl_id.num_self_template_params(self);
++ let num_params =
++ template_decl_id.num_self_template_params(self);
+ if num_params == 0 {
+ None
+ } else {
+@@ -1590,7 +1563,8 @@
+ .cloned()
+ })
+ .and_then(|template_decl| {
+- let num_template_params = template_decl.num_self_template_params(self);
++ let num_template_params =
++ template_decl.num_self_template_params(self);
+ if num_template_params == 0 {
+ None
+ } else {
+@@ -1644,11 +1618,12 @@
+ ty: &clang::Type,
+ location: clang::Cursor,
+ ) -> Option<TypeId> {
+- let num_expected_args = self.resolve_type(template).num_self_template_params(self);
++ let num_expected_args =
++ self.resolve_type(template).num_self_template_params(self);
+ if num_expected_args == 0 {
+ warn!(
+ "Tried to instantiate a template for which we could not \
+- determine any template parameters"
++ determine any template parameters"
+ );
+ return None;
+ }
+@@ -1668,13 +1643,14 @@
+ // being specialized via the `location`'s type, and if we do not
+ // filter it out, we'll add an extra layer of template instantiation
+ // on accident.
+- let idx = children.iter().position(|c| {
+- c.kind() == clang_sys::CXCursor_TemplateRef
+- });
++ let idx = children
++ .iter()
++ .position(|c| c.kind() == clang_sys::CXCursor_TemplateRef);
+ if let Some(idx) = idx {
+- if children.iter().take(idx).all(|c| {
+- c.kind() == clang_sys::CXCursor_NamespaceRef
+- })
++ if children
++ .iter()
++ .take(idx)
++ .all(|c| c.kind() == clang_sys::CXCursor_NamespaceRef)
+ {
+ children = children.into_iter().skip(idx + 1).collect();
+ }
+@@ -1701,8 +1677,13 @@
+ args.push(ty);
+ }
+ clang_sys::CXCursor_TemplateRef => {
+- let (template_decl_cursor, template_decl_id, num_expected_template_args) =
+- self.get_declaration_info_for_template_instantiation(child)?;
++ let (
++ template_decl_cursor,
++ template_decl_id,
++ num_expected_template_args,
++ ) = self.get_declaration_info_for_template_instantiation(
++ child,
++ )?;
+
+ if num_expected_template_args == 0 ||
+ child.has_at_least_num_children(
+@@ -1727,7 +1708,7 @@
+ if args_len < num_expected_template_args {
+ warn!(
+ "Found a template instantiation without \
+- enough template arguments"
++ enough template arguments"
+ );
+ return None;
+ }
+@@ -1767,7 +1748,7 @@
+ // Bypass all the validations in add_item explicitly.
+ debug!(
+ "instantiate_template: inserting nested \
+- instantiation item: {:?}",
++ instantiation item: {:?}",
+ sub_item
+ );
+ self.add_item_to_module(&sub_item);
+@@ -1795,7 +1776,7 @@
+ // situation...
+ warn!(
+ "Found template instantiated with a const value; \
+- bindgen can't handle this kind of template instantiation!"
++ bindgen can't handle this kind of template instantiation!"
+ );
+ return None;
+ }
+@@ -1803,7 +1784,7 @@
+ if args.len() != num_expected_args {
+ warn!(
+ "Found a template with an unexpected number of template \
+- arguments"
++ arguments"
+ );
+ return None;
+ }
+@@ -1845,9 +1826,9 @@
+ self.types
+ .get(&TypeKey::Declaration(*decl.cursor()))
+ .or_else(|| {
+- decl.cursor().usr().and_then(
+- |usr| self.types.get(&TypeKey::USR(usr)),
+- )
++ decl.cursor()
++ .usr()
++ .and_then(|usr| self.types.get(&TypeKey::USR(usr)))
+ })
+ .cloned()
+ }
+@@ -1864,19 +1845,14 @@
+ use clang_sys::{CXCursor_TypeAliasTemplateDecl, CXCursor_TypeRef};
+ debug!(
+ "builtin_or_resolved_ty: {:?}, {:?}, {:?}",
+- ty,
+- location,
+- parent_id
++ ty, location, parent_id
+ );
+
+ if let Some(decl) = ty.canonical_declaration(location.as_ref()) {
+ if let Some(id) = self.get_resolved_type(&decl) {
+ debug!(
+ "Already resolved ty {:?}, {:?}, {:?} {:?}",
+- id,
+- decl,
+- ty,
+- location
++ id, decl, ty, location
+ );
+ // If the declaration already exists, then either:
+ //
+@@ -1908,7 +1884,8 @@
+ return None;
+ }
+
+- return self.instantiate_template(with_id, id, ty, location)
++ return self
++ .instantiate_template(with_id, id, ty, location)
+ .or_else(|| Some(id));
+ }
+
+@@ -1935,13 +1912,7 @@
+ parent_id: Option<ItemId>,
+ ty: &clang::Type,
+ ) -> TypeId {
+- self.build_wrapper(
+- with_id,
+- wrapped_id,
+- parent_id,
+- ty,
+- ty.is_const(),
+- )
++ self.build_wrapper(with_id, wrapped_id, parent_id, ty, ty.is_const())
+ }
+
+ /// A wrapper over a type that adds a const qualifier explicitly.
+@@ -1955,11 +1926,7 @@
+ ty: &clang::Type,
+ ) -> TypeId {
+ self.build_wrapper(
+- with_id,
+- wrapped_id,
+- parent_id,
+- ty,
+- /* is_const = */ true,
++ with_id, wrapped_id, parent_id, ty, /* is_const = */ true,
+ )
+ }
+
+@@ -2001,12 +1968,8 @@
+ CXType_Bool => TypeKind::Int(IntKind::Bool),
+ CXType_Int => TypeKind::Int(IntKind::Int),
+ CXType_UInt => TypeKind::Int(IntKind::UInt),
+- CXType_Char_S => TypeKind::Int(IntKind::Char {
+- is_signed: true,
+- }),
+- CXType_Char_U => TypeKind::Int(IntKind::Char {
+- is_signed: false,
+- }),
++ CXType_Char_S => TypeKind::Int(IntKind::Char { is_signed: true }),
++ CXType_Char_U => TypeKind::Int(IntKind::Char { is_signed: false }),
+ CXType_SChar => TypeKind::Int(IntKind::SChar),
+ CXType_UChar => TypeKind::Int(IntKind::UChar),
+ CXType_Short => TypeKind::Int(IntKind::Short),
+@@ -2032,13 +1995,10 @@
+ CXType_Double => FloatKind::Double,
+ CXType_LongDouble => FloatKind::LongDouble,
+ CXType_Float128 => FloatKind::Float128,
+- _ => {
+- panic!(
+- "Non floating-type complex? {:?}, {:?}",
+- ty,
+- float_type,
+- )
+- },
++ _ => panic!(
++ "Non floating-type complex? {:?}, {:?}",
++ ty, float_type,
++ ),
+ };
+ TypeKind::Complex(float_kind)
+ }
+@@ -2050,8 +2010,13 @@
+ let layout = ty.fallible_layout(self).ok();
+ let ty = Type::new(Some(spelling), layout, type_kind, is_const);
+ let id = self.next_item_id();
+- let item =
+- Item::new(id, None, None, self.root_module.into(), ItemKind::Type(ty));
++ let item = Item::new(
++ id,
++ None,
++ None,
++ self.root_module.into(),
++ ItemKind::Type(ty),
++ );
+ self.add_builtin_item(item);
+ Some(id.as_type_id_unchecked())
+ }
+@@ -2067,7 +2032,9 @@
+ }
+
+ /// Get the currently parsed macros.
+- pub fn parsed_macros(&self) -> &StdHashMap<Vec<u8>, cexpr::expr::EvalResult> {
++ pub fn parsed_macros(
++ &self,
++ ) -> &StdHashMap<Vec<u8>, cexpr::expr::EvalResult> {
+ debug_assert!(!self.in_codegen_phase());
+ &self.parsed_macros
+ }
+@@ -2096,15 +2063,14 @@
+ Entry::Vacant(entry) => {
+ debug!(
+ "Defining replacement for {:?} as {:?}",
+- name,
+- potential_ty
++ name, potential_ty
+ );
+ entry.insert(potential_ty);
+ }
+ Entry::Occupied(occupied) => {
+ warn!(
+ "Replacement for {:?} already defined as {:?}; \
+- ignoring duplicate replacement definition as {:?}",
++ ignoring duplicate replacement definition as {:?}",
+ name,
+ occupied.get(),
+ potential_ty
+@@ -2115,7 +2081,11 @@
+
+ /// Has the item with the given `name` and `id` been replaced by another
+ /// type?
+- pub fn is_replaced_type<Id: Into<ItemId>>(&self, path: &[String], id: Id) -> bool {
++ pub fn is_replaced_type<Id: Into<ItemId>>(
++ &self,
++ path: &[String],
++ id: Id,
++ ) -> bool {
+ let id = id.into();
+ match self.replacements.get(path) {
+ Some(replaced_by) if *replaced_by != id => true,
+@@ -2185,7 +2155,8 @@
+ }
+ name if found_namespace_keyword => {
+ if module_name.is_none() {
+- module_name = Some(String::from_utf8_lossy(name).into_owned());
++ module_name =
++ Some(String::from_utf8_lossy(name).into_owned());
+ }
+ break;
+ }
+@@ -2273,7 +2244,8 @@
+ let _t = self.timer("compute_whitelisted_and_codegen_items");
+
+ let roots = {
+- let mut roots = self.items()
++ let mut roots = self
++ .items()
+ // Only consider roots that are enabled for codegen.
+ .filter(|&(_, item)| item.is_enabled_for_codegen(self))
+ .filter(|&(_, item)| {
+@@ -2281,9 +2253,10 @@
+ // game.
+ if self.options().whitelisted_types.is_empty() &&
+ self.options().whitelisted_functions.is_empty() &&
+- self.options().whitelisted_vars.is_empty() {
+- return true;
+- }
++ self.options().whitelisted_vars.is_empty()
++ {
++ return true;
++ }
+
+ // If this is a type that explicitly replaces another, we assume
+ // you know what you're doing.
+@@ -2324,7 +2297,7 @@
+ TypeKind::ResolvedTypeRef(..) |
+ TypeKind::Opaque |
+ TypeKind::TypeParam => return true,
+- _ => {},
++ _ => {}
+ };
+ }
+
+@@ -2338,7 +2311,6 @@
+ return false;
+ }
+
+-
+ let enum_ = match *ty.kind() {
+ TypeKind::Enum(ref e) => e,
+ _ => return false,
+@@ -2354,9 +2326,7 @@
+ prefix_path.push(variant.name().into());
+ let name = prefix_path[1..].join("::");
+ prefix_path.pop().unwrap();
+- self.options()
+- .whitelisted_vars
+- .matches(&name)
++ self.options().whitelisted_vars.matches(&name)
+ })
+ }
+ }
+@@ -2386,14 +2356,16 @@
+ self,
+ roots.clone(),
+ whitelisted_items_predicate,
+- ).collect::<ItemSet>();
++ )
++ .collect::<ItemSet>();
+
+ let codegen_items = if self.options().whitelist_recursively {
+ WhitelistedItemsTraversal::new(
+ self,
+ roots.clone(),
+ traversal::codegen_edges,
+- ).collect::<ItemSet>()
++ )
++ .collect::<ItemSet>()
+ } else {
+ whitelisted.clone()
+ };
+@@ -2439,7 +2411,11 @@
+ let _t = self.timer("compute_cannot_derive_debug");
+ assert!(self.cannot_derive_debug.is_none());
+ if self.options.derive_debug {
+- self.cannot_derive_debug = Some(as_cannot_derive_set(analyze::<CannotDerive>((self, DeriveTrait::Debug))));
++ self.cannot_derive_debug =
++ Some(as_cannot_derive_set(analyze::<CannotDerive>((
++ self,
++ DeriveTrait::Debug,
++ ))));
+ }
+ }
+
+@@ -2463,7 +2439,10 @@
+ assert!(self.cannot_derive_default.is_none());
+ if self.options.derive_default {
+ self.cannot_derive_default =
+- Some(as_cannot_derive_set(analyze::<CannotDerive>((self, DeriveTrait::Default))));
++ Some(as_cannot_derive_set(analyze::<CannotDerive>((
++ self,
++ DeriveTrait::Default,
++ ))));
+ }
+ }
+
+@@ -2485,7 +2464,11 @@
+ fn compute_cannot_derive_copy(&mut self) {
+ let _t = self.timer("compute_cannot_derive_copy");
+ assert!(self.cannot_derive_copy.is_none());
+- self.cannot_derive_copy = Some(as_cannot_derive_set(analyze::<CannotDerive>((self, DeriveTrait::Copy))));
++ self.cannot_derive_copy =
++ Some(as_cannot_derive_set(analyze::<CannotDerive>((
++ self,
++ DeriveTrait::Copy,
++ ))));
+ }
+
+ /// Compute whether we can derive hash.
+@@ -2493,7 +2476,11 @@
+ let _t = self.timer("compute_cannot_derive_hash");
+ assert!(self.cannot_derive_hash.is_none());
+ if self.options.derive_hash {
+- self.cannot_derive_hash = Some(as_cannot_derive_set(analyze::<CannotDerive>((self, DeriveTrait::Hash))));
++ self.cannot_derive_hash =
++ Some(as_cannot_derive_set(analyze::<CannotDerive>((
++ self,
++ DeriveTrait::Hash,
++ ))));
+ }
+ }
+
+@@ -2515,13 +2502,23 @@
+ fn compute_cannot_derive_partialord_partialeq_or_eq(&mut self) {
+ let _t = self.timer("compute_cannot_derive_partialord_partialeq_or_eq");
+ assert!(self.cannot_derive_partialeq_or_partialord.is_none());
+- if self.options.derive_partialord || self.options.derive_partialeq || self.options.derive_eq {
+- self.cannot_derive_partialeq_or_partialord = Some(analyze::<CannotDerive>((self, DeriveTrait::PartialEqOrPartialOrd)));
++ if self.options.derive_partialord ||
++ self.options.derive_partialeq ||
++ self.options.derive_eq
++ {
++ self.cannot_derive_partialeq_or_partialord =
++ Some(analyze::<CannotDerive>((
++ self,
++ DeriveTrait::PartialEqOrPartialOrd,
++ )));
+ }
+ }
+
+ /// Look up whether the item with `id` can derive `Partial{Eq,Ord}`.
+- pub fn lookup_can_derive_partialeq_or_partialord<Id: Into<ItemId>>(&self, id: Id) -> CanDerive {
++ pub fn lookup_can_derive_partialeq_or_partialord<Id: Into<ItemId>>(
++ &self,
++ id: Id,
++ ) -> CanDerive {
+ let id = id.into();
+ assert!(
+ self.in_codegen_phase(),
+@@ -2530,7 +2527,8 @@
+
+ // Look up the computed value for whether the item with `id` can
+ // derive partialeq or not.
+- self.cannot_derive_partialeq_or_partialord.as_ref()
++ self.cannot_derive_partialeq_or_partialord
++ .as_ref()
+ .unwrap()
+ .get(&id)
+ .cloned()
+@@ -2561,7 +2559,10 @@
+ }
+
+ /// Look up whether the item with `id` has type parameter in array or not.
+- pub fn lookup_has_type_param_in_array<Id: Into<ItemId>>(&self, id: Id) -> bool {
++ pub fn lookup_has_type_param_in_array<Id: Into<ItemId>>(
++ &self,
++ id: Id,
++ ) -> bool {
+ assert!(
+ self.in_codegen_phase(),
+ "We only compute has array when we enter codegen"
+@@ -2569,7 +2570,10 @@
+
+ // Look up the computed value for whether the item with `id` has
+ // type parameter in array or not.
+- self.has_type_param_in_array.as_ref().unwrap().contains(&id.into())
++ self.has_type_param_in_array
++ .as_ref()
++ .unwrap()
++ .contains(&id.into())
+ }
+
+ /// Compute whether the type has float.
+@@ -2583,8 +2587,10 @@
+
+ /// Look up whether the item with `id` has array or not.
+ pub fn lookup_has_float<Id: Into<ItemId>>(&self, id: Id) -> bool {
+- assert!(self.in_codegen_phase(),
+- "We only compute has float when we enter codegen");
++ assert!(
++ self.in_codegen_phase(),
++ "We only compute has float when we enter codegen"
++ );
+
+ // Look up the computed value for whether the item with `id` has
+ // float or not.
+@@ -2627,7 +2633,7 @@
+
+ impl<T> From<T> for ItemResolver
+ where
+- T: Into<ItemId>
++ T: Into<ItemId>,
+ {
+ fn from(id: T) -> ItemResolver {
+ ItemResolver::new(id)
+@@ -2667,14 +2673,16 @@
+ let ty_kind = item.as_type().map(|t| t.kind());
+ match ty_kind {
+ Some(&TypeKind::ResolvedTypeRef(next_id))
+- if self.through_type_refs => {
++ if self.through_type_refs =>
++ {
+ id = next_id.into();
+ }
+ // We intentionally ignore template aliases here, as they are
+ // more complicated, and don't represent a simple renaming of
+ // some type.
+ Some(&TypeKind::Alias(next_id))
+- if self.through_type_aliases => {
++ if self.through_type_aliases =>
++ {
+ id = next_id.into();
+ }
+ _ => return item,
+@@ -2696,10 +2704,7 @@
+ /// Construct a new `PartialType`.
+ pub fn new(decl: Cursor, id: ItemId) -> PartialType {
+ // assert!(decl == decl.canonical());
+- PartialType {
+- decl: decl,
+- id: id,
+- }
++ PartialType { decl: decl, id: id }
+ }
+
+ /// The cursor pointing to this partial type's declaration location.
+@@ -2715,10 +2720,7 @@
+ }
+
+ impl TemplateParameters for PartialType {
+- fn self_template_params(
+- &self,
+- _ctx: &BindgenContext,
+- ) -> Vec<TypeId> {
++ fn self_template_params(&self, _ctx: &BindgenContext) -> Vec<TypeId> {
+ // Maybe at some point we will eagerly parse named types, but for now we
+ // don't and this information is unavailable.
+ vec![]
+diff --git a/third_party/rust/bindgen/src/ir/derive.rs b/third_party/rust/bindgen/src/ir/derive.rs
+--- a/third_party/rust/bindgen/src/ir/derive.rs
++++ b/third_party/rust/bindgen/src/ir/derive.rs
+@@ -92,10 +92,10 @@
+ ///
+ /// Initially we assume that we can derive trait for all types and then
+ /// update our understanding as we learn more about each type.
+-#[derive(Debug, Copy, Clone, PartialEq, Eq, Ord)]
++#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+ pub enum CanDerive {
+- /// No, we cannot.
+- No,
++ /// Yes, we can derive automatically.
++ Yes,
+
+ /// The only thing that stops us from automatically deriving is that
+ /// array with more than maximum number of elements is used.
+@@ -103,8 +103,8 @@
+ /// This means we probably can "manually" implement such trait.
+ Manually,
+
+- /// Yes, we can derive automatically.
+- Yes,
++ /// No, we cannot.
++ No,
+ }
+
+ impl Default for CanDerive {
+@@ -113,22 +113,6 @@
+ }
+ }
+
+-impl cmp::PartialOrd for CanDerive {
+- fn partial_cmp(&self, rhs: &Self) -> Option<cmp::Ordering> {
+- use self::CanDerive::*;
+-
+- let ordering = match (*self, *rhs) {
+- (x, y) if x == y => cmp::Ordering::Equal,
+- (No, _) => cmp::Ordering::Greater,
+- (_, No) => cmp::Ordering::Less,
+- (Manually, _) => cmp::Ordering::Greater,
+- (_, Manually) => cmp::Ordering::Less,
+- _ => unreachable!()
+- };
+- Some(ordering)
+- }
+-}
+-
+ impl CanDerive {
+ /// Take the least upper bound of `self` and `rhs`.
+ pub fn join(self, rhs: Self) -> Self {
+diff --git a/third_party/rust/bindgen/src/ir/dot.rs b/third_party/rust/bindgen/src/ir/dot.rs
+--- a/third_party/rust/bindgen/src/ir/dot.rs
++++ b/third_party/rust/bindgen/src/ir/dot.rs
+@@ -60,11 +60,7 @@
+ id.as_usize(),
+ sub_id.as_usize(),
+ edge_kind,
+- if is_whitelisted {
+- "black"
+- } else {
+- "gray"
+- }
++ if is_whitelisted { "black" } else { "gray" }
+ ) {
+ Ok(_) => {}
+ Err(e) => err = Some(Err(e)),
+diff --git a/third_party/rust/bindgen/src/ir/enum_ty.rs b/third_party/rust/bindgen/src/ir/enum_ty.rs
+--- a/third_party/rust/bindgen/src/ir/enum_ty.rs
++++ b/third_party/rust/bindgen/src/ir/enum_ty.rs
+@@ -1,8 +1,8 @@
+ //! Intermediate representation for C/C++ enumerations.
+
++use super::super::codegen::EnumVariation;
+ use super::context::{BindgenContext, TypeId};
+ use super::item::Item;
+-use super::super::codegen::EnumVariation;
+ use super::ty::TypeKind;
+ use clang;
+ use ir::annotations::Annotations;
+@@ -38,10 +38,7 @@
+ impl Enum {
+ /// Construct a new `Enum` with the given representation and variants.
+ pub fn new(repr: Option<TypeId>, variants: Vec<EnumVariant>) -> Self {
+- Enum {
+- repr,
+- variants,
+- }
++ Enum { repr, variants }
+ }
+
+ /// Get this enumeration's representation.
+@@ -67,15 +64,15 @@
+ }
+
+ let declaration = ty.declaration().canonical();
+- let repr = declaration.enum_type().and_then(|et| {
+- Item::from_ty(&et, declaration, None, ctx).ok()
+- });
++ let repr = declaration
++ .enum_type()
++ .and_then(|et| Item::from_ty(&et, declaration, None, ctx).ok());
+ let mut variants = vec![];
+
+ // Assume signedness since the default type by the C standard is an int.
+- let is_signed = repr.and_then(
+- |r| ctx.resolve_type(r).safe_canonical_type(ctx),
+- ).map_or(true, |ty| match *ty.kind() {
++ let is_signed = repr
++ .and_then(|r| ctx.resolve_type(r).safe_canonical_type(ctx))
++ .map_or(true, |ty| match *ty.kind() {
+ TypeKind::Int(ref int_kind) => int_kind.is_signed(),
+ ref other => {
+ panic!("Since when enums can be non-integers? {:?}", other)
+@@ -101,9 +98,11 @@
+ if let Some(val) = value {
+ let name = cursor.spelling();
+ let annotations = Annotations::new(&cursor);
+- let custom_behavior = ctx.parse_callbacks()
++ let custom_behavior = ctx
++ .parse_callbacks()
+ .and_then(|callbacks| {
+- callbacks.enum_variant_behavior(type_name, &name, val)
++ callbacks
++ .enum_variant_behavior(type_name, &name, val)
+ })
+ .or_else(|| {
+ let annotations = annotations.as_ref()?;
+@@ -116,12 +115,17 @@
+ }
+ });
+
+- let name = ctx.parse_callbacks()
++ let name = ctx
++ .parse_callbacks()
+ .and_then(|callbacks| {
+ callbacks.enum_variant_name(type_name, &name, val)
+ })
+ .or_else(|| {
+- annotations.as_ref()?.use_instead_of()?.last().cloned()
++ annotations
++ .as_ref()?
++ .use_instead_of()?
++ .last()
++ .cloned()
+ })
+ .unwrap_or(name);
+
+@@ -139,7 +143,12 @@
+ Ok(Enum::new(repr, variants))
+ }
+
+- fn is_matching_enum(&self, ctx: &BindgenContext, enums: &RegexSet, item: &Item) -> bool {
++ fn is_matching_enum(
++ &self,
++ ctx: &BindgenContext,
++ enums: &RegexSet,
++ item: &Item,
++ ) -> bool {
+ let path = item.canonical_path(ctx);
+ let enum_ty = item.expect_type();
+
+@@ -156,18 +165,46 @@
+ }
+
+ /// Returns the final representation of the enum.
+- pub fn computed_enum_variation(&self, ctx: &BindgenContext, item: &Item) -> EnumVariation {
++ pub fn computed_enum_variation(
++ &self,
++ ctx: &BindgenContext,
++ item: &Item,
++ ) -> EnumVariation {
+ // ModuleConsts has higher precedence before Rust in order to avoid
+ // problems with overlapping match patterns.
+- if self.is_matching_enum(ctx, &ctx.options().constified_enum_modules, item) {
++ if self.is_matching_enum(
++ ctx,
++ &ctx.options().constified_enum_modules,
++ item,
++ ) {
+ EnumVariation::ModuleConsts
+- } else if self.is_matching_enum(ctx, &ctx.options().bitfield_enums, item) {
++ } else if self.is_matching_enum(
++ ctx,
++ &ctx.options().bitfield_enums,
++ item,
++ ) {
+ EnumVariation::Bitfield
+- } else if self.is_matching_enum(ctx, &ctx.options().rustified_enums, item) {
+- EnumVariation::Rust { non_exhaustive: false }
+- } else if self.is_matching_enum(ctx, &ctx.options().rustified_non_exhaustive_enums, item) {
+- EnumVariation::Rust { non_exhaustive: true }
+- } else if self.is_matching_enum(ctx, &ctx.options().constified_enums, item) {
++ } else if self.is_matching_enum(
++ ctx,
++ &ctx.options().rustified_enums,
++ item,
++ ) {
++ EnumVariation::Rust {
++ non_exhaustive: false,
++ }
++ } else if self.is_matching_enum(
++ ctx,
++ &ctx.options().rustified_non_exhaustive_enums,
++ item,
++ ) {
++ EnumVariation::Rust {
++ non_exhaustive: true,
++ }
++ } else if self.is_matching_enum(
++ ctx,
++ &ctx.options().constified_enums,
++ item,
++ ) {
+ EnumVariation::Consts
+ } else {
+ ctx.options().default_enum_style
+@@ -235,16 +272,14 @@
+ /// Returns whether this variant should be enforced to be a constant by code
+ /// generation.
+ pub fn force_constification(&self) -> bool {
+- self.custom_behavior.map_or(false, |b| {
+- b == EnumVariantCustomBehavior::Constify
+- })
++ self.custom_behavior
++ .map_or(false, |b| b == EnumVariantCustomBehavior::Constify)
+ }
+
+ /// Returns whether the current variant should be hidden completely from the
+ /// resulting rust enum.
+ pub fn hidden(&self) -> bool {
+- self.custom_behavior.map_or(false, |b| {
+- b == EnumVariantCustomBehavior::Hide
+- })
++ self.custom_behavior
++ .map_or(false, |b| b == EnumVariantCustomBehavior::Hide)
+ }
+ }
+diff --git a/third_party/rust/bindgen/src/ir/function.rs b/third_party/rust/bindgen/src/ir/function.rs
+--- a/third_party/rust/bindgen/src/ir/function.rs
++++ b/third_party/rust/bindgen/src/ir/function.rs
+@@ -9,9 +9,9 @@
+ use clang;
+ use clang_sys::{self, CXCallingConv};
+ use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
++use proc_macro2;
+ use quote;
+ use quote::TokenStreamExt;
+-use proc_macro2;
+ use std::io;
+
+ const RUST_DERIVE_FUNPTR_LIMIT: usize = 12;
+@@ -30,18 +30,18 @@
+ // FIXME(emilio): Deduplicate logic with `ir::comp`.
+ Some(match cursor.kind() {
+ clang_sys::CXCursor_FunctionDecl => FunctionKind::Function,
+- clang_sys::CXCursor_Constructor => FunctionKind::Method(
+- MethodKind::Constructor,
+- ),
+- clang_sys::CXCursor_Destructor => FunctionKind::Method(
+- if cursor.method_is_virtual() {
++ clang_sys::CXCursor_Constructor => {
++ FunctionKind::Method(MethodKind::Constructor)
++ }
++ clang_sys::CXCursor_Destructor => {
++ FunctionKind::Method(if cursor.method_is_virtual() {
+ MethodKind::VirtualDestructor {
+ pure_virtual: cursor.method_is_pure_virtual(),
+ }
+ } else {
+ MethodKind::Destructor
+- }
+- ),
++ })
++ }
+ clang_sys::CXCursor_CXXMethod => {
+ if cursor.method_is_virtual() {
+ FunctionKind::Method(MethodKind::Virtual {
+@@ -64,7 +64,7 @@
+ /// Externally visible and can be linked against
+ External,
+ /// Not exposed externally. 'static inline' functions will have this kind of linkage
+- Internal
++ Internal,
+ }
+
+ /// A function declaration, with a signature, arguments, and argument names.
+@@ -100,7 +100,7 @@
+ signature: TypeId,
+ comment: Option<String>,
+ kind: FunctionKind,
+- linkage: Linkage
++ linkage: Linkage,
+ ) -> Self {
+ Function {
+ name,
+@@ -136,7 +136,6 @@
+ pub fn linkage(&self) -> Linkage {
+ self.linkage
+ }
+-
+ }
+
+ impl DotAttributes for Function {
+@@ -325,16 +324,16 @@
+ cursor_args
+ .map(Some)
+ .chain(std::iter::repeat(None))
+- .zip(
+- type_args
+- .map(Some)
+- .chain(std::iter::repeat(None))
+- )
++ .zip(type_args.map(Some).chain(std::iter::repeat(None)))
+ .take_while(|(cur, ty)| cur.is_some() || ty.is_some())
+ .map(|(arg_cur, arg_ty)| {
+- let name = arg_cur
+- .map(|a| a.spelling())
+- .and_then(|name| if name.is_empty() { None} else { Some(name) });
++ let name = arg_cur.map(|a| a.spelling()).and_then(|name| {
++ if name.is_empty() {
++ None
++ } else {
++ Some(name)
++ }
++ });
+
+ let cursor = arg_cur.unwrap_or(*cursor);
+ let ty = arg_ty.unwrap_or(cursor.cur_type());
+@@ -404,7 +403,7 @@
+ CXCursor_ObjCInstanceMethodDecl |
+ CXCursor_ObjCClassMethodDecl => {
+ args_from_ty_and_cursor(&ty, &cursor, ctx)
+- },
++ }
+ _ => {
+ // For non-CXCursor_FunctionDecl, visiting the cursor's children
+ // is the only reliable way to get parameter names.
+@@ -424,8 +423,7 @@
+ }
+ };
+
+- let must_use =
+- ctx.options().enable_function_attribute_detection &&
++ let must_use = ctx.options().enable_function_attribute_detection &&
+ cursor.has_simple_attr("warn_unused_result");
+ let is_method = kind == CXCursor_CXXMethod;
+ let is_constructor = kind == CXCursor_Constructor;
+@@ -475,9 +473,9 @@
+ let ty_ret_type = if kind == CXCursor_ObjCInstanceMethodDecl ||
+ kind == CXCursor_ObjCClassMethodDecl
+ {
+- ty.ret_type().or_else(|| cursor.ret_type()).ok_or(
+- ParseError::Continue,
+- )?
++ ty.ret_type()
++ .or_else(|| cursor.ret_type())
++ .ok_or(ParseError::Continue)?
+ } else {
+ ty.ret_type().ok_or(ParseError::Continue)?
+ };
+@@ -583,12 +581,11 @@
+ let linkage = match linkage {
+ CXLinkage_External | CXLinkage_UniqueExternal => Linkage::External,
+ CXLinkage_Internal => Linkage::Internal,
+- _ => return Err(ParseError::Continue)
++ _ => return Err(ParseError::Continue),
+ };
+
+ // Grab the signature using Item::from_ty.
+- let sig =
+- Item::from_ty(&cursor.cur_type(), cursor, None, context)?;
++ let sig = Item::from_ty(&cursor.cur_type(), cursor, None, context)?;
+
+ let mut name = cursor.spelling();
+ assert!(!name.is_empty(), "Empty function name?");
+@@ -610,7 +607,8 @@
+ let mangled_name = cursor_mangling(context, &cursor);
+ let comment = cursor.raw_comment();
+
+- let function = Self::new(name, mangled_name, sig, comment, kind, linkage);
++ let function =
++ Self::new(name, mangled_name, sig, comment, kind, linkage);
+ Ok(ParseResult::New(function, Some(cursor)))
+ }
+ }
+diff --git a/third_party/rust/bindgen/src/ir/int.rs b/third_party/rust/bindgen/src/ir/int.rs
+--- a/third_party/rust/bindgen/src/ir/int.rs
++++ b/third_party/rust/bindgen/src/ir/int.rs
+@@ -101,9 +101,7 @@
+
+ Char { is_signed } => is_signed,
+
+- Custom {
+- is_signed, ..
+- } => is_signed,
++ Custom { is_signed, .. } => is_signed,
+ }
+ }
+
+@@ -113,14 +111,7 @@
+ pub fn known_size(&self) -> Option<usize> {
+ use self::IntKind::*;
+ Some(match *self {
+- Bool |
+- UChar |
+- SChar |
+- U8 |
+- I8 |
+- Char {
+- ..
+- } => 1,
++ Bool | UChar | SChar | U8 | I8 | Char { .. } => 1,
+ U16 | I16 => 2,
+ U32 | I32 => 4,
+ U64 | I64 => 8,
+diff --git a/third_party/rust/bindgen/src/ir/item.rs b/third_party/rust/bindgen/src/ir/item.rs
+--- a/third_party/rust/bindgen/src/ir/item.rs
++++ b/third_party/rust/bindgen/src/ir/item.rs
+@@ -1,19 +1,20 @@
+ //! Bindgen's core intermediate representation type.
+
++use super::super::codegen::{EnumVariation, CONSTIFIED_ENUM_MODULE_REPR_NAME};
+ use super::analysis::{HasVtable, HasVtableResult, Sizedness, SizednessResult};
+ use super::annotations::Annotations;
+ use super::comment;
+ use super::comp::MethodKind;
+ use super::context::{BindgenContext, ItemId, PartialType, TypeId};
+-use super::derive::{CanDeriveCopy, CanDeriveDebug, CanDeriveDefault,
+- CanDeriveHash, CanDerivePartialOrd, CanDeriveOrd,
+- CanDerivePartialEq, CanDeriveEq};
++use super::derive::{
++ CanDeriveCopy, CanDeriveDebug, CanDeriveDefault, CanDeriveEq,
++ CanDeriveHash, CanDeriveOrd, CanDerivePartialEq, CanDerivePartialOrd,
++};
+ use super::dot::DotAttributes;
+ use super::function::{Function, FunctionKind};
+ use super::item_kind::ItemKind;
+ use super::layout::Opaque;
+ use super::module::Module;
+-use super::super::codegen::{CONSTIFIED_ENUM_MODULE_REPR_NAME, EnumVariation};
+ use super::template::{AsTemplateParam, TemplateParameters};
+ use super::traversal::{EdgeKind, Trace, Tracer};
+ use super::ty::{Type, TypeKind};
+@@ -94,10 +95,7 @@
+ /// up to (but not including) the implicit root module.
+ pub trait ItemAncestors {
+ /// Get an iterable over this item's ancestors.
+- fn ancestors<'a>(
+- &self,
+- ctx: &'a BindgenContext,
+- ) -> ItemAncestorsIter<'a>;
++ fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a>;
+ }
+
+ cfg_if! {
+@@ -158,7 +156,8 @@
+
+ impl<T> AsTemplateParam for T
+ where
+- T: Copy + Into<ItemId> {
++ T: Copy + Into<ItemId>,
++{
+ type Extra = ();
+
+ fn as_template_param(
+@@ -201,7 +200,7 @@
+
+ impl<T> ItemCanonicalName for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn canonical_name(&self, ctx: &BindgenContext) -> String {
+ debug_assert!(
+@@ -213,8 +212,8 @@
+ }
+
+ impl<T> ItemCanonicalPath for T
+- where
+- T: Copy + Into<ItemId>
++where
++ T: Copy + Into<ItemId>,
+ {
+ fn namespace_aware_canonical_path(
+ &self,
+@@ -238,28 +237,22 @@
+
+ impl<T> ItemAncestors for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+- fn ancestors<'a>(
+- &self,
+- ctx: &'a BindgenContext,
+- ) -> ItemAncestorsIter<'a> {
++ fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> {
+ ItemAncestorsIter::new(ctx, *self)
+ }
+ }
+
+ impl ItemAncestors for Item {
+- fn ancestors<'a>(
+- &self,
+- ctx: &'a BindgenContext,
+- ) -> ItemAncestorsIter<'a> {
++ fn ancestors<'a>(&self, ctx: &'a BindgenContext) -> ItemAncestorsIter<'a> {
+ self.id().ancestors(ctx)
+ }
+ }
+
+ impl<Id> Trace for Id
+ where
+- Id: Copy + Into<ItemId>
++ Id: Copy + Into<ItemId>,
+ {
+ type Extra = ();
+
+@@ -495,10 +488,10 @@
+ ctx.options().conservative_inline_namespaces
+ })
+ })
+- .count() + 1
++ .count() +
++ 1
+ }
+
+-
+ /// Get this `Item`'s comment, if it has any, already preprocessed and with
+ /// the right indentation.
+ pub fn comment(&self, ctx: &BindgenContext) -> Option<String> {
+@@ -567,7 +560,8 @@
+ pub fn is_toplevel(&self, ctx: &BindgenContext) -> bool {
+ // FIXME: Workaround for some types falling behind when parsing weird
+ // stl classes, for example.
+- if ctx.options().enable_cxx_namespaces && self.kind().is_module() &&
++ if ctx.options().enable_cxx_namespaces &&
++ self.kind().is_module() &&
+ self.id() != ctx.root_module()
+ {
+ return false;
+@@ -583,7 +577,7 @@
+ if parent_item.id() == ctx.root_module() {
+ return true;
+ } else if ctx.options().enable_cxx_namespaces ||
+- !parent_item.kind().is_module()
++ !parent_item.kind().is_module()
+ {
+ return false;
+ }
+@@ -638,18 +632,17 @@
+ let path = self.path_for_whitelisting(ctx);
+ let name = path[1..].join("::");
+ ctx.options().blacklisted_items.matches(&name) ||
+- match self.kind {
+- ItemKind::Type(..) => {
+- ctx.options().blacklisted_types.matches(&name) ||
+- ctx.is_replaced_type(&path, self.id)
++ match self.kind {
++ ItemKind::Type(..) => {
++ ctx.options().blacklisted_types.matches(&name) ||
++ ctx.is_replaced_type(&path, self.id)
++ }
++ ItemKind::Function(..) => {
++ ctx.options().blacklisted_functions.matches(&name)
++ }
++ // TODO: Add constant / namespace blacklisting?
++ ItemKind::Var(..) | ItemKind::Module(..) => false,
+ }
+- ItemKind::Function(..) => {
+- ctx.options().blacklisted_functions.matches(&name)
+- }
+- // TODO: Add constant / namespace blacklisting?
+- ItemKind::Var(..) |
+- ItemKind::Module(..) => false,
+- }
+ }
+
+ /// Is this a reference to another type?
+@@ -666,10 +659,7 @@
+ }
+
+ /// Take out item NameOptions
+- pub fn name<'a>(
+- &'a self,
+- ctx: &'a BindgenContext,
+- ) -> NameOptions<'a> {
++ pub fn name<'a>(&'a self, ctx: &'a BindgenContext) -> NameOptions<'a> {
+ NameOptions::new(self, ctx)
+ }
+
+@@ -687,17 +677,15 @@
+ }
+
+ match *item.kind() {
+- ItemKind::Type(ref ty) => {
+- match *ty.kind() {
+- TypeKind::ResolvedTypeRef(inner) => {
+- item = ctx.resolve_item(inner);
+- }
+- TypeKind::TemplateInstantiation(ref inst) => {
+- item = ctx.resolve_item(inst.template_definition());
+- }
+- _ => return item.id(),
++ ItemKind::Type(ref ty) => match *ty.kind() {
++ TypeKind::ResolvedTypeRef(inner) => {
++ item = ctx.resolve_item(inner);
+ }
+- }
++ TypeKind::TemplateInstantiation(ref inst) => {
++ item = ctx.resolve_item(inst.template_definition());
++ }
++ _ => return item.id(),
++ },
+ _ => return item.id(),
+ }
+ }
+@@ -752,7 +740,8 @@
+ if let TypeKind::Comp(ref ci) = *ty.kind() {
+ // All the constructors have the same name, so no need to
+ // resolve and check.
+- return ci.constructors()
++ return ci
++ .constructors()
+ .iter()
+ .position(|c| *c == self.id())
+ .or_else(|| {
+@@ -937,7 +926,8 @@
+
+ match *type_.kind() {
+ TypeKind::Enum(ref enum_) => {
+- enum_.computed_enum_variation(ctx, self) == EnumVariation::ModuleConsts
++ enum_.computed_enum_variation(ctx, self) ==
++ EnumVariation::ModuleConsts
+ }
+ TypeKind::Alias(inner_id) => {
+ // TODO(emilio): Make this "hop through type aliases that aren't
+@@ -962,17 +952,21 @@
+ ItemKind::Module(..) => true,
+ ItemKind::Var(_) => cc.vars(),
+ ItemKind::Type(_) => cc.types(),
+- ItemKind::Function(ref f) => {
+- match f.kind() {
+- FunctionKind::Function => cc.functions(),
+- FunctionKind::Method(MethodKind::Constructor) => cc.constructors(),
+- FunctionKind::Method(MethodKind::Destructor) |
+- FunctionKind::Method(MethodKind::VirtualDestructor { .. }) => cc.destructors(),
+- FunctionKind::Method(MethodKind::Static) |
+- FunctionKind::Method(MethodKind::Normal) |
+- FunctionKind::Method(MethodKind::Virtual { .. }) => cc.methods(),
++ ItemKind::Function(ref f) => match f.kind() {
++ FunctionKind::Function => cc.functions(),
++ FunctionKind::Method(MethodKind::Constructor) => {
++ cc.constructors()
+ }
+- }
++ FunctionKind::Method(MethodKind::Destructor) |
++ FunctionKind::Method(MethodKind::VirtualDestructor {
++ ..
++ }) => cc.destructors(),
++ FunctionKind::Method(MethodKind::Static) |
++ FunctionKind::Method(MethodKind::Normal) |
++ FunctionKind::Method(MethodKind::Virtual { .. }) => {
++ cc.methods()
++ }
++ },
+ }
+ }
+
+@@ -982,7 +976,11 @@
+ self.compute_path(ctx, UserMangled::No)
+ }
+
+- fn compute_path(&self, ctx: &BindgenContext, mangled: UserMangled) -> Vec<String> {
++ fn compute_path(
++ &self,
++ ctx: &BindgenContext,
++ mangled: UserMangled,
++ ) -> Vec<String> {
+ if let Some(path) = self.annotations().use_instead_of() {
+ let mut ret =
+ vec![ctx.resolve_item(ctx.root_module()).name(ctx).get()];
+@@ -1017,7 +1015,7 @@
+
+ impl<T> IsOpaque for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ type Extra = ();
+
+@@ -1046,7 +1044,7 @@
+
+ impl<T> HasVtable for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn has_vtable(&self, ctx: &BindgenContext) -> bool {
+ let id: ItemId = (*self).into();
+@@ -1079,7 +1077,7 @@
+
+ impl<T> Sizedness for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn sizedness(&self, ctx: &BindgenContext) -> SizednessResult {
+ let id: ItemId = (*self).into();
+@@ -1096,7 +1094,7 @@
+
+ impl<T> HasTypeParamInArray for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn has_type_param_in_array(&self, ctx: &BindgenContext) -> bool {
+ debug_assert!(
+@@ -1119,19 +1117,23 @@
+
+ impl<T> HasFloat for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+ fn has_float(&self, ctx: &BindgenContext) -> bool {
+- debug_assert!(ctx.in_codegen_phase(),
+- "You're not supposed to call this yet");
++ debug_assert!(
++ ctx.in_codegen_phase(),
++ "You're not supposed to call this yet"
++ );
+ ctx.lookup_has_float(*self)
+ }
+ }
+
+ impl HasFloat for Item {
+ fn has_float(&self, ctx: &BindgenContext) -> bool {
+- debug_assert!(ctx.in_codegen_phase(),
+- "You're not supposed to call this yet");
++ debug_assert!(
++ ctx.in_codegen_phase(),
++ "You're not supposed to call this yet"
++ );
+ ctx.lookup_has_float(self.id())
+ }
+ }
+@@ -1166,40 +1168,30 @@
+
+ impl<T> TemplateParameters for T
+ where
+- T: Copy + Into<ItemId>
++ T: Copy + Into<ItemId>,
+ {
+- fn self_template_params(
+- &self,
+- ctx: &BindgenContext,
+- ) -> Vec<TypeId> {
+- ctx.resolve_item_fallible(*self).map_or(vec![], |item| {
+- item.self_template_params(ctx)
+- })
++ fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId> {
++ ctx.resolve_item_fallible(*self)
++ .map_or(vec![], |item| item.self_template_params(ctx))
+ }
+ }
+
+ impl TemplateParameters for Item {
+- fn self_template_params(
+- &self,
+- ctx: &BindgenContext,
+- ) -> Vec<TypeId> {
++ fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId> {
+ self.kind.self_template_params(ctx)
+ }
+ }
+
+ impl TemplateParameters for ItemKind {
+- fn self_template_params(
+- &self,
+- ctx: &BindgenContext,
+- ) -> Vec<TypeId> {
++ fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId> {
+ match *self {
+ ItemKind::Type(ref ty) => ty.self_template_params(ctx),
+ // If we start emitting bindings to explicitly instantiated
+ // functions, then we'll need to check ItemKind::Function for
+ // template params.
+- ItemKind::Function(_) |
+- ItemKind::Module(_) |
+- ItemKind::Var(_) => vec![],
++ ItemKind::Function(_) | ItemKind::Module(_) | ItemKind::Var(_) => {
++ vec![]
++ }
+ }
+ }
+ }
+@@ -1256,14 +1248,13 @@
+ id.as_type_id_unchecked()
+ }
+
+-
+ fn parse(
+ cursor: clang::Cursor,
+ parent_id: Option<ItemId>,
+ ctx: &mut BindgenContext,
+ ) -> Result<ItemId, ParseError> {
++ use clang_sys::*;
+ use ir::var::Var;
+- use clang_sys::*;
+
+ if !cursor.is_valid() {
+ return Err(ParseError::Continue);
+@@ -1281,20 +1272,28 @@
+ Ok(ParseResult::New(item, declaration)) => {
+ let id = ctx.next_item_id();
+
+- ctx.add_item(Item::new(id, comment, annotations,
+- relevant_parent_id,
+- ItemKind::$what(item)),
+- declaration,
+- Some(cursor));
++ ctx.add_item(
++ Item::new(
++ id,
++ comment,
++ annotations,
++ relevant_parent_id,
++ ItemKind::$what(item),
++ ),
++ declaration,
++ Some(cursor),
++ );
+ return Ok(id);
+ }
+ Ok(ParseResult::AlreadyResolved(id)) => {
+ return Ok(id);
+ }
+- Err(ParseError::Recurse) => return Err(ParseError::Recurse),
+- Err(ParseError::Continue) => {},
++ Err(ParseError::Recurse) => {
++ return Err(ParseError::Recurse)
++ }
++ Err(ParseError::Continue) => {}
+ }
+- }
++ };
+ }
+
+ try_parse!(Module);
+@@ -1315,7 +1314,6 @@
+ let definition = cursor.definition();
+ let applicable_cursor = definition.unwrap_or(cursor);
+
+-
+ let relevant_parent_id = match definition {
+ Some(definition) => {
+ if definition != cursor {
+@@ -1325,7 +1323,8 @@
+ cursor,
+ parent_id,
+ ctx,
+- ).into());
++ )
++ .into());
+ }
+ ctx.known_semantic_parent(definition)
+ .or(parent_id)
+@@ -1412,10 +1411,7 @@
+ ) -> TypeId {
+ debug!(
+ "from_ty_or_ref_with_id: {:?} {:?}, {:?}, {:?}",
+- potential_id,
+- ty,
+- location,
+- parent_id
++ potential_id, ty, location, parent_id
+ );
+
+ if ctx.collected_typerefs() {
+@@ -1426,9 +1422,8 @@
+ location,
+ parent_id,
+ ctx,
+- ).unwrap_or_else(
+- |_| Item::new_opaque_type(potential_id, &ty, ctx),
+- );
++ )
++ .unwrap_or_else(|_| Item::new_opaque_type(potential_id, &ty, ctx));
+ }
+
+ if let Some(ty) = ctx.builtin_or_resolved_ty(
+@@ -1436,8 +1431,7 @@
+ parent_id,
+ &ty,
+ Some(location),
+- )
+- {
++ ) {
+ debug!("{:?} already resolved: {:?}", ty, location);
+ return ty;
+ }
+@@ -1491,17 +1485,14 @@
+
+ debug!(
+ "Item::from_ty_with_id: {:?}\n\
+- \tty = {:?},\n\
+- \tlocation = {:?}",
+- id,
+- ty,
+- location
++ \tty = {:?},\n\
++ \tlocation = {:?}",
++ id, ty, location
+ );
+
+ if ty.kind() == clang_sys::CXType_Unexposed ||
+ location.cur_type().kind() == clang_sys::CXType_Unexposed
+ {
+-
+ if ty.is_associated_type() ||
+ location.cur_type().is_associated_type()
+ {
+@@ -1528,12 +1519,8 @@
+ }
+ }
+
+- if let Some(ty) = ctx.builtin_or_resolved_ty(
+- id,
+- parent_id,
+- ty,
+- Some(location),
+- )
++ if let Some(ty) =
++ ctx.builtin_or_resolved_ty(id, parent_id, ty, Some(location))
+ {
+ return Ok(ty);
+ }
+@@ -1550,11 +1537,10 @@
+ };
+
+ if valid_decl {
+- if let Some(partial) = ctx.currently_parsed_types().iter().find(
+- |ty| {
+- *ty.decl() == declaration_to_look_for
+- },
+- )
++ if let Some(partial) = ctx
++ .currently_parsed_types()
++ .iter()
++ .find(|ty| *ty.decl() == declaration_to_look_for)
+ {
+ debug!("Avoiding recursion parsing type: {:?}", ty);
+ // Unchecked because we haven't finished this type yet.
+@@ -1571,7 +1557,9 @@
+ let result = Type::from_clang_ty(id, ty, location, parent_id, ctx);
+ let relevant_parent_id = parent_id.unwrap_or(current_module);
+ let ret = match result {
+- Ok(ParseResult::AlreadyResolved(ty)) => Ok(ty.as_type_id_unchecked()),
++ Ok(ParseResult::AlreadyResolved(ty)) => {
++ Ok(ty.as_type_id_unchecked())
++ }
+ Ok(ParseResult::New(item, declaration)) => {
+ ctx.add_item(
+ Item::new(
+@@ -1619,7 +1607,7 @@
+ if let Err(ParseError::Recurse) = result {
+ warn!(
+ "Unknown type, assuming named template type: \
+- id = {:?}; spelling = {}",
++ id = {:?}; spelling = {}",
+ id,
+ ty.spelling()
+ );
+@@ -1652,9 +1640,9 @@
+
+ debug!(
+ "Item::type_param:\n\
+- \twith_id = {:?},\n\
+- \tty = {} {:?},\n\
+- \tlocation: {:?}",
++ \twith_id = {:?},\n\
++ \tty = {} {:?},\n\
++ \tlocation: {:?}",
+ with_id,
+ ty.spelling(),
+ ty,
+@@ -1735,50 +1723,51 @@
+ (refd_spelling.is_empty() && ANON_TYPE_PARAM_RE.is_match(spelling.as_ref()))
+ }
+
+- let definition =
+- if is_template_with_spelling(&location, &ty_spelling) {
+- // Situation (1)
+- location
+- } else if location.kind() == clang_sys::CXCursor_TypeRef {
+- // Situation (2)
+- match location.referenced() {
+- Some(refd)
+- if is_template_with_spelling(&refd, &ty_spelling) => {
+- refd
+- }
+- _ => return None,
++ let definition = if is_template_with_spelling(&location, &ty_spelling) {
++ // Situation (1)
++ location
++ } else if location.kind() == clang_sys::CXCursor_TypeRef {
++ // Situation (2)
++ match location.referenced() {
++ Some(refd)
++ if is_template_with_spelling(&refd, &ty_spelling) =>
++ {
++ refd
+ }
+- } else {
+- // Situation (3)
+- let mut definition = None;
++ _ => return None,
++ }
++ } else {
++ // Situation (3)
++ let mut definition = None;
+
+- location.visit(|child| {
+- let child_ty = child.cur_type();
+- if child_ty.kind() == clang_sys::CXCursor_TypeRef &&
+- child_ty.spelling() == ty_spelling
+- {
+- match child.referenced() {
+- Some(refd)
+- if is_template_with_spelling(
+- &refd,
+- &ty_spelling,
+- ) => {
+- definition = Some(refd);
+- return clang_sys::CXChildVisit_Break;
+- }
+- _ => {}
++ location.visit(|child| {
++ let child_ty = child.cur_type();
++ if child_ty.kind() == clang_sys::CXCursor_TypeRef &&
++ child_ty.spelling() == ty_spelling
++ {
++ match child.referenced() {
++ Some(refd)
++ if is_template_with_spelling(
++ &refd,
++ &ty_spelling,
++ ) =>
++ {
++ definition = Some(refd);
++ return clang_sys::CXChildVisit_Break;
+ }
++ _ => {}
+ }
++ }
+
+- clang_sys::CXChildVisit_Continue
+- });
++ clang_sys::CXChildVisit_Continue
++ });
+
+- if let Some(def) = definition {
+- def
+- } else {
+- return None;
+- }
+- };
++ if let Some(def) = definition {
++ def
++ } else {
++ return None;
++ }
++ };
+ assert!(is_template_with_spelling(&definition, &ty_spelling));
+
+ // Named types are always parented to the root module. They are never
+@@ -1789,9 +1778,12 @@
+
+ if let Some(id) = ctx.get_type_param(&definition) {
+ if let Some(with_id) = with_id {
+- return Some(
+- ctx.build_ty_wrapper(with_id, id, Some(parent), &ty),
+- );
++ return Some(ctx.build_ty_wrapper(
++ with_id,
++ id,
++ Some(parent),
++ &ty,
++ ));
+ } else {
+ return Some(id);
+ }
+diff --git a/third_party/rust/bindgen/src/ir/item_kind.rs b/third_party/rust/bindgen/src/ir/item_kind.rs
+--- a/third_party/rust/bindgen/src/ir/item_kind.rs
++++ b/third_party/rust/bindgen/src/ir/item_kind.rs
+@@ -135,11 +135,7 @@
+ where
+ W: io::Write,
+ {
+- writeln!(
+- out,
+- "<tr><td>kind</td><td>{}</td></tr>",
+- self.kind_name()
+- )?;
++ writeln!(out, "<tr><td>kind</td><td>{}</td></tr>", self.kind_name())?;
+
+ match *self {
+ ItemKind::Module(ref module) => module.dot_attributes(ctx, out),
+diff --git a/third_party/rust/bindgen/src/ir/layout.rs b/third_party/rust/bindgen/src/ir/layout.rs
+--- a/third_party/rust/bindgen/src/ir/layout.rs
++++ b/third_party/rust/bindgen/src/ir/layout.rs
+@@ -1,9 +1,9 @@
+ //! Intermediate representation for the physical layout of some type.
+
+ use super::derive::CanDerive;
+-use super::ty::{RUST_DERIVE_IN_ARRAY_LIMIT, Type, TypeKind};
++use super::ty::{Type, TypeKind, RUST_DERIVE_IN_ARRAY_LIMIT};
++use clang;
+ use ir::context::BindgenContext;
+-use clang;
+ use std::cmp;
+
+ /// A type that represents the struct layout of a type.
+@@ -107,7 +107,10 @@
+
+ /// Return the known rust type we should use to create a correctly-aligned
+ /// field with this layout.
+- pub fn known_rust_type_for_array(&self,ctx: &BindgenContext) -> Option<&'static str> {
++ pub fn known_rust_type_for_array(
++ &self,
++ ctx: &BindgenContext,
++ ) -> Option<&'static str> {
+ Layout::known_type_for_size(ctx, self.0.align)
+ }
+
+@@ -124,10 +127,14 @@
+ /// Return `true` if this opaque layout's array size will fit within the
+ /// maximum number of array elements that Rust allows deriving traits
+ /// with. Return `false` otherwise.
+- pub fn array_size_within_derive_limit(&self, ctx: &BindgenContext) -> CanDerive {
+- if self.array_size(ctx).map_or(false, |size| {
+- size <= RUST_DERIVE_IN_ARRAY_LIMIT
+- }) {
++ pub fn array_size_within_derive_limit(
++ &self,
++ ctx: &BindgenContext,
++ ) -> CanDerive {
++ if self
++ .array_size(ctx)
++ .map_or(false, |size| size <= RUST_DERIVE_IN_ARRAY_LIMIT)
++ {
+ CanDerive::Yes
+ } else {
+ CanDerive::Manually
+diff --git a/third_party/rust/bindgen/src/ir/mod.rs b/third_party/rust/bindgen/src/ir/mod.rs
+--- a/third_party/rust/bindgen/src/ir/mod.rs
++++ b/third_party/rust/bindgen/src/ir/mod.rs
+@@ -3,10 +3,10 @@
+ //! Parsing C/C++ generates the IR, while code generation outputs Rust code from
+ //! the IR.
+
++pub mod analysis;
+ pub mod annotations;
+-pub mod analysis;
++pub mod comment;
+ pub mod comp;
+-pub mod comment;
+ pub mod context;
+ pub mod derive;
+ pub mod dot;
+@@ -17,8 +17,8 @@
+ pub mod item_kind;
+ pub mod layout;
+ pub mod module;
++pub mod objc;
+ pub mod template;
+ pub mod traversal;
+ pub mod ty;
+ pub mod var;
+-pub mod objc;
+diff --git a/third_party/rust/bindgen/src/ir/module.rs b/third_party/rust/bindgen/src/ir/module.rs
+--- a/third_party/rust/bindgen/src/ir/module.rs
++++ b/third_party/rust/bindgen/src/ir/module.rs
+@@ -82,9 +82,9 @@
+ CXCursor_Namespace => {
+ let module_id = ctx.module(cursor);
+ ctx.with_module(module_id, |ctx| {
+- cursor.visit(
+- |cursor| parse_one(ctx, cursor, Some(module_id.into())),
+- )
++ cursor.visit(|cursor| {
++ parse_one(ctx, cursor, Some(module_id.into()))
++ })
+ });
+
+ Ok(ParseResult::AlreadyResolved(module_id.into()))
+diff --git a/third_party/rust/bindgen/src/ir/objc.rs b/third_party/rust/bindgen/src/ir/objc.rs
+--- a/third_party/rust/bindgen/src/ir/objc.rs
++++ b/third_party/rust/bindgen/src/ir/objc.rs
+@@ -12,7 +12,7 @@
+ use clang_sys::CXCursor_ObjCInstanceMethodDecl;
+ use clang_sys::CXCursor_ObjCProtocolDecl;
+ use clang_sys::CXCursor_ObjCProtocolRef;
+-use proc_macro2::{TokenStream, Ident, Span};
++use proc_macro2::{Ident, Span, TokenStream};
+
+ /// Objective C interface as used in TypeKind
+ ///
+@@ -212,7 +212,8 @@
+
+ /// Formats the method call
+ pub fn format_method_call(&self, args: &[TokenStream]) -> TokenStream {
+- let split_name: Vec<_> = self.name
++ let split_name: Vec<_> = self
++ .name
+ .split(':')
+ .filter(|p| !p.is_empty())
+ .map(|name| Ident::new(name, Span::call_site()))
+@@ -242,7 +243,7 @@
+ let name_and_sig: Vec<&str> = arg.split(' ').collect();
+ let name = name_and_sig[0];
+ args_without_types.push(Ident::new(name, Span::call_site()))
+- };
++ }
+
+ let args = split_name
+ .into_iter()
+diff --git a/third_party/rust/bindgen/src/ir/template.rs b/third_party/rust/bindgen/src/ir/template.rs
+--- a/third_party/rust/bindgen/src/ir/template.rs
++++ b/third_party/rust/bindgen/src/ir/template.rs
+@@ -99,7 +99,7 @@
+ /// ... |Wtf | ... | [T] |
+ /// ... |Qux | ... | [] |
+ /// ----+------+-----+----------------------+
+-pub trait TemplateParameters : Sized {
++pub trait TemplateParameters: Sized {
+ /// Get the set of `ItemId`s that make up this template declaration's free
+ /// template parameters.
+ ///
+@@ -135,9 +135,11 @@
+ Self: ItemAncestors,
+ {
+ let ancestors: Vec<_> = self.ancestors(ctx).collect();
+- ancestors.into_iter().rev().flat_map(|id| {
+- id.self_template_params(ctx).into_iter()
+- }).collect()
++ ancestors
++ .into_iter()
++ .rev()
++ .flat_map(|id| id.self_template_params(ctx).into_iter())
++ .collect()
+ }
+
+ /// Get only the set of template parameters that this item uses. This is a
+@@ -153,10 +155,11 @@
+ );
+
+ let id = *self.as_ref();
+- ctx.resolve_item(id).all_template_params(ctx)
+- .into_iter()
+- .filter(|p| ctx.uses_template_parameter(id, *p))
+- .collect()
++ ctx.resolve_item(id)
++ .all_template_params(ctx)
++ .into_iter()
++ .filter(|p| ctx.uses_template_parameter(id, *p))
++ .collect()
+ }
+ }
+
+@@ -221,34 +224,33 @@
+ ) -> Option<TemplateInstantiation> {
+ use clang_sys::*;
+
+- let template_args = ty.template_args()
+- .map_or(vec![], |args| {
+- match ty.canonical_type().template_args() {
+- Some(canonical_args) => {
+- let arg_count = args.len();
+- args.chain(canonical_args.skip(arg_count))
+- .filter(|t| t.kind() != CXType_Invalid)
+- .map(|t| {
+- Item::from_ty_or_ref(t, t.declaration(), None, ctx)
+- }).collect()
+- }
+- None => {
+- args.filter(|t| t.kind() != CXType_Invalid)
+- .map(|t| {
+- Item::from_ty_or_ref(t, t.declaration(), None, ctx)
+- }).collect()
+- }
+- }
+- });
++ let template_args = ty.template_args().map_or(vec![], |args| match ty
++ .canonical_type()
++ .template_args()
++ {
++ Some(canonical_args) => {
++ let arg_count = args.len();
++ args.chain(canonical_args.skip(arg_count))
++ .filter(|t| t.kind() != CXType_Invalid)
++ .map(|t| {
++ Item::from_ty_or_ref(t, t.declaration(), None, ctx)
++ })
++ .collect()
++ }
++ None => args
++ .filter(|t| t.kind() != CXType_Invalid)
++ .map(|t| Item::from_ty_or_ref(t, t.declaration(), None, ctx))
++ .collect(),
++ });
+
+ let declaration = ty.declaration();
+- let definition =
+- if declaration.kind() == CXCursor_TypeAliasTemplateDecl {
+- Some(declaration)
+- } else {
+- declaration.specialized().or_else(|| {
+- let mut template_ref = None;
+- ty.declaration().visit(|child| {
++ let definition = if declaration.kind() == CXCursor_TypeAliasTemplateDecl
++ {
++ Some(declaration)
++ } else {
++ declaration.specialized().or_else(|| {
++ let mut template_ref = None;
++ ty.declaration().visit(|child| {
+ if child.kind() == CXCursor_TemplateRef {
+ template_ref = Some(child);
+ return CXVisit_Break;
+@@ -261,9 +263,9 @@
+ CXChildVisit_Recurse
+ });
+
+- template_ref.and_then(|cur| cur.referenced())
+- })
+- };
++ template_ref.and_then(|cur| cur.referenced())
++ })
++ };
+
+ let definition = match definition {
+ Some(def) => def,
+@@ -271,7 +273,7 @@
+ if !ty.declaration().is_builtin() {
+ warn!(
+ "Could not find template definition for template \
+- instantiation"
++ instantiation"
+ );
+ }
+ return None;
+@@ -305,7 +307,8 @@
+ // arguments properly.
+
+ let mut path = item.canonical_path(ctx);
+- let args: Vec<_> = self.template_arguments()
++ let args: Vec<_> = self
++ .template_arguments()
+ .iter()
+ .map(|arg| {
+ let arg_path = arg.canonical_path(ctx);
+@@ -330,7 +333,8 @@
+ where
+ T: Tracer,
+ {
+- tracer.visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration);
++ tracer
++ .visit_kind(self.definition.into(), EdgeKind::TemplateDeclaration);
+ for arg in self.template_arguments() {
+ tracer.visit_kind(arg.into(), EdgeKind::TemplateArgument);
+ }
+diff --git a/third_party/rust/bindgen/src/ir/traversal.rs b/third_party/rust/bindgen/src/ir/traversal.rs
+--- a/third_party/rust/bindgen/src/ir/traversal.rs
++++ b/third_party/rust/bindgen/src/ir/traversal.rs
+@@ -20,10 +20,7 @@
+ impl Edge {
+ /// Construct a new edge whose referent is `to` and is of the given `kind`.
+ pub fn new(to: ItemId, kind: EdgeKind) -> Edge {
+- Edge {
+- to,
+- kind,
+- }
++ Edge { to, kind }
+ }
+ }
+
+@@ -236,7 +233,7 @@
+ EdgeKind::InnerVar => cc.vars(),
+ EdgeKind::Method => cc.methods(),
+ EdgeKind::Constructor => cc.constructors(),
+- EdgeKind::Destructor => cc.destructors()
++ EdgeKind::Destructor => cc.destructors(),
+ }
+ }
+
+@@ -269,10 +266,7 @@
+ /// each item. This is useful for providing debug assertions with meaningful
+ /// diagnostic messages about dangling items.
+ #[derive(Debug)]
+-pub struct Paths<'ctx>(
+- BTreeMap<ItemId, ItemId>,
+- &'ctx BindgenContext
+-);
++pub struct Paths<'ctx>(BTreeMap<ItemId, ItemId>, &'ctx BindgenContext);
+
+ impl<'ctx> TraversalStorage<'ctx> for Paths<'ctx> {
+ fn new(ctx: &'ctx BindgenContext) -> Self {
+@@ -289,7 +283,7 @@
+ loop {
+ let predecessor = *self.0.get(&current).expect(
+ "We know we found this item id, so it must have a \
+- predecessor",
++ predecessor",
+ );
+ if predecessor == current {
+ break;
+@@ -300,8 +294,7 @@
+ path.reverse();
+ panic!(
+ "Found reference to dangling id = {:?}\nvia path = {:?}",
+- item,
+- path
++ item, path
+ );
+ }
+
+@@ -495,13 +488,12 @@
+ ///
+ /// See `BindgenContext::assert_no_dangling_item_traversal` for more
+ /// information.
+-pub type AssertNoDanglingItemsTraversal<'ctx> =
+- ItemTraversal<
+- 'ctx,
+- Paths<'ctx>,
+- VecDeque<ItemId>,
+- for<'a> fn(&'a BindgenContext, Edge) -> bool,
+- >;
++pub type AssertNoDanglingItemsTraversal<'ctx> = ItemTraversal<
++ 'ctx,
++ Paths<'ctx>,
++ VecDeque<ItemId>,
++ for<'a> fn(&'a BindgenContext, Edge) -> bool,
++>;
+
+ #[cfg(test)]
+ mod tests {
+@@ -511,6 +503,6 @@
+ #[allow(dead_code)]
+ fn traversal_predicate_is_object_safe() {
+ // This should compile only if TraversalPredicate is object safe.
+- fn takes_by_trait_object(_: &TraversalPredicate) {}
++ fn takes_by_trait_object(_: &dyn TraversalPredicate) {}
+ }
+ }
+diff --git a/third_party/rust/bindgen/src/ir/ty.rs b/third_party/rust/bindgen/src/ir/ty.rs
+--- a/third_party/rust/bindgen/src/ir/ty.rs
++++ b/third_party/rust/bindgen/src/ir/ty.rs
+@@ -9,8 +9,9 @@
+ use super::item::{IsOpaque, Item};
+ use super::layout::{Layout, Opaque};
+ use super::objc::ObjCInterface;
+-use super::template::{AsTemplateParam, TemplateInstantiation,
+- TemplateParameters};
++use super::template::{
++ AsTemplateParam, TemplateInstantiation, TemplateParameters,
++};
+ use super::traversal::{EdgeKind, Trace, Tracer};
+ use clang::{self, Cursor};
+ use parse::{ClangItemParser, ParseError, ParseResult};
+@@ -235,7 +236,11 @@
+ pub fn is_incomplete_array(&self, ctx: &BindgenContext) -> Option<ItemId> {
+ match self.kind {
+ TypeKind::Array(item, len) => {
+- if len == 0 { Some(item.into()) } else { None }
++ if len == 0 {
++ Some(item.into())
++ } else {
++ None
++ }
+ }
+ TypeKind::ResolvedTypeRef(inner) => {
+ ctx.resolve_type(inner).is_incomplete_array(ctx)
+@@ -249,20 +254,15 @@
+ self.layout.or_else(|| {
+ match self.kind {
+ TypeKind::Comp(ref ci) => ci.layout(ctx),
+- TypeKind::Array(inner, length) if length == 0 => {
+- Some(Layout::new(
+- 0,
+- ctx.resolve_type(inner).layout(ctx)?.align,
+- ))
+- }
++ TypeKind::Array(inner, length) if length == 0 => Some(
++ Layout::new(0, ctx.resolve_type(inner).layout(ctx)?.align),
++ ),
+ // FIXME(emilio): This is a hack for anonymous union templates.
+ // Use the actual pointer size!
+- TypeKind::Pointer(..) => {
+- Some(Layout::new(
+- ctx.target_pointer_size(),
+- ctx.target_pointer_size(),
+- ))
+- }
++ TypeKind::Pointer(..) => Some(Layout::new(
++ ctx.target_pointer_size(),
++ ctx.target_pointer_size(),
++ )),
+ TypeKind::ResolvedTypeRef(inner) => {
+ ctx.resolve_type(inner).layout(ctx)
+ }
+@@ -301,8 +301,12 @@
+ ctx: &BindgenContext,
+ ) -> Option<Cow<'a, str>> {
+ let name_info = match *self.kind() {
+- TypeKind::Pointer(inner) => Some((inner.into(), Cow::Borrowed("ptr"))),
+- TypeKind::Reference(inner) => Some((inner.into(), Cow::Borrowed("ref"))),
++ TypeKind::Pointer(inner) => {
++ Some((inner.into(), Cow::Borrowed("ptr")))
++ }
++ TypeKind::Reference(inner) => {
++ Some((inner.into(), Cow::Borrowed("ref")))
++ }
+ TypeKind::Array(inner, length) => {
+ Some((inner, format!("array{}", length).into()))
+ }
+@@ -323,9 +327,8 @@
+ &'tr self,
+ ctx: &'tr BindgenContext,
+ ) -> &'tr Type {
+- self.safe_canonical_type(ctx).expect(
+- "Should have been resolved after parsing!",
+- )
++ self.safe_canonical_type(ctx)
++ .expect("Should have been resolved after parsing!")
+ }
+
+ /// Returns the canonical type of this type, that is, the "inner type".
+@@ -362,10 +365,9 @@
+ TypeKind::TemplateAlias(inner, _) => {
+ ctx.resolve_type(inner).safe_canonical_type(ctx)
+ }
+- TypeKind::TemplateInstantiation(ref inst) => {
+- ctx.resolve_type(inst.template_definition())
+- .safe_canonical_type(ctx)
+- }
++ TypeKind::TemplateInstantiation(ref inst) => ctx
++ .resolve_type(inst.template_definition())
++ .safe_canonical_type(ctx),
+
+ TypeKind::UnresolvedTypeRef(..) => None,
+ }
+@@ -445,8 +447,7 @@
+ out,
+ "<tr><td>size</td><td>{}</td></tr>
+ <tr><td>align</td><td>{}</td></tr>",
+- layout.size,
+- layout.align
++ layout.size, layout.align
+ )?;
+ if layout.packed {
+ writeln!(out, "<tr><td>packed</td><td>true</td></tr>")?;
+@@ -470,7 +471,11 @@
+ where
+ W: io::Write,
+ {
+- writeln!(out, "<tr><td>type kind</td><td>{}</td></tr>", self.kind_name())?;
++ writeln!(
++ out,
++ "<tr><td>type kind</td><td>{}</td></tr>",
++ self.kind_name()
++ )?;
+
+ if let TypeKind::Comp(ref comp) = *self {
+ comp.dot_attributes(ctx, out)?;
+@@ -559,19 +564,13 @@
+ }
+
+ impl TemplateParameters for Type {
+- fn self_template_params(
+- &self,
+- ctx: &BindgenContext,
+- ) -> Vec<TypeId> {
++ fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId> {
+ self.kind.self_template_params(ctx)
+ }
+ }
+
+ impl TemplateParameters for TypeKind {
+- fn self_template_params(
+- &self,
+- ctx: &BindgenContext,
+- ) -> Vec<TypeId> {
++ fn self_template_params(&self, ctx: &BindgenContext) -> Vec<TypeId> {
+ match *self {
+ TypeKind::ResolvedTypeRef(id) => {
+ ctx.resolve_type(id).self_template_params(ctx)
+@@ -687,7 +686,7 @@
+ clang::Type,
+ clang::Cursor,
+ /* parent_id */
+- Option<ItemId>
++ Option<ItemId>,
+ ),
+
+ /// An indirection to another type.
+@@ -742,9 +741,7 @@
+
+ debug!(
+ "from_clang_ty: {:?}, ty: {:?}, loc: {:?}",
+- potential_id,
+- ty,
+- location
++ potential_id, ty, location
+ );
+ debug!("currently_parsed_types: {:?}", ctx.currently_parsed_types());
+
+@@ -753,8 +750,9 @@
+ // Parse objc protocols as if they were interfaces
+ let mut ty_kind = ty.kind();
+ match location.kind() {
+- CXCursor_ObjCProtocolDecl |
+- CXCursor_ObjCCategoryDecl => ty_kind = CXType_ObjCInterface,
++ CXCursor_ObjCProtocolDecl | CXCursor_ObjCCategoryDecl => {
++ ty_kind = CXType_ObjCInterface
++ }
+ _ => {}
+ }
+
+@@ -764,10 +762,10 @@
+ // We are rewriting them as id to suppress multiple conflicting
+ // typedefs at root level
+ if ty_kind == CXType_Typedef {
+- let is_template_type_param = ty.declaration().kind() ==
+- CXCursor_TemplateTypeParameter;
+- let is_canonical_objcpointer = canonical_ty.kind() ==
+- CXType_ObjCObjectPointer;
++ let is_template_type_param =
++ ty.declaration().kind() == CXCursor_TemplateTypeParameter;
++ let is_canonical_objcpointer =
++ canonical_ty.kind() == CXType_ObjCObjectPointer;
+
+ // We have found a template type for objc interface
+ if is_canonical_objcpointer && is_template_type_param {
+@@ -775,19 +773,19 @@
+ // To keep it simple, just name them ids
+ name = "id".to_owned();
+ }
+-
+ }
+
+ if location.kind() == CXCursor_ClassTemplatePartialSpecialization {
+ // Sorry! (Not sorry)
+ warn!(
+ "Found a partial template specialization; bindgen does not \
+- support partial template specialization! Constructing \
+- opaque type instead."
++ support partial template specialization! Constructing \
++ opaque type instead."
+ );
+- return Ok(
+- ParseResult::New(Opaque::from_clang_ty(&canonical_ty, ctx), None),
+- );
++ return Ok(ParseResult::New(
++ Opaque::from_clang_ty(&canonical_ty, ctx),
++ None,
++ ));
+ }
+
+ let kind = if location.kind() == CXCursor_TemplateRef ||
+@@ -800,7 +798,8 @@
+ }
+ } else {
+ match ty_kind {
+- CXType_Unexposed if *ty != canonical_ty &&
++ CXType_Unexposed
++ if *ty != canonical_ty &&
+ canonical_ty.kind() != CXType_Invalid &&
+ ty.ret_type().is_none() &&
+ // Sometime clang desugars some types more than
+@@ -815,13 +814,16 @@
+ // => { ... }
+ //
+ // etc.
+- !canonical_ty.spelling().contains("type-parameter") => {
++ !canonical_ty.spelling().contains("type-parameter") =>
++ {
+ debug!("Looking for canonical type: {:?}", canonical_ty);
+- return Self::from_clang_ty(potential_id,
+- &canonical_ty,
+- location,
+- parent_id,
+- ctx);
++ return Self::from_clang_ty(
++ potential_id,
++ &canonical_ty,
++ location,
++ parent_id,
++ ctx,
++ );
+ }
+ CXType_Unexposed | CXType_Invalid => {
+ // For some reason Clang doesn't give us any hint in some
+@@ -837,23 +839,21 @@
+ } else if ty.is_fully_instantiated_template() {
+ debug!(
+ "Template specialization: {:?}, {:?} {:?}",
+- ty,
+- location,
+- canonical_ty
++ ty, location, canonical_ty
+ );
+ let complex = CompInfo::from_ty(
+ potential_id,
+ ty,
+ Some(location),
+ ctx,
+- ).expect("C'mon");
++ )
++ .expect("C'mon");
+ TypeKind::Comp(complex)
+ } else {
+ match location.kind() {
+ CXCursor_CXXBaseSpecifier |
+ CXCursor_ClassTemplate => {
+- if location.kind() ==
+- CXCursor_CXXBaseSpecifier
++ if location.kind() == CXCursor_CXXBaseSpecifier
+ {
+ // In the case we're parsing a base specifier
+ // inside an unexposed or invalid type, it means
+@@ -896,8 +896,7 @@
+ // [4]: inherit-namespaced.hpp
+ if location.spelling().chars().all(|c| {
+ c.is_alphanumeric() || c == '_'
+- })
+- {
++ }) {
+ return Err(ParseError::Recurse);
+ }
+ } else {
+@@ -915,13 +914,14 @@
+ Err(_) => {
+ warn!(
+ "Could not create complex type \
+- from class template or base \
+- specifier, using opaque blob"
++ from class template or base \
++ specifier, using opaque blob"
+ );
+- let opaque = Opaque::from_clang_ty(ty, ctx);
+- return Ok(
+- ParseResult::New(opaque, None),
+- );
++ let opaque =
++ Opaque::from_clang_ty(ty, ctx);
++ return Ok(ParseResult::New(
++ opaque, None,
++ ));
+ }
+ }
+ }
+@@ -944,7 +944,8 @@
+
+ name = current.spelling();
+
+- let inner_ty = cur.typedef_type()
++ let inner_ty = cur
++ .typedef_type()
+ .expect("Not valid Type?");
+ inner = Ok(Item::from_ty_or_ref(
+ inner_ty,
+@@ -954,13 +955,14 @@
+ ));
+ }
+ CXCursor_TemplateTypeParameter => {
+- let param =
+- Item::type_param(None,
+- cur,
+- ctx)
+- .expect("Item::type_param shouldn't \
+- ever fail if we are looking \
+- at a TemplateTypeParameter");
++ let param = Item::type_param(
++ None, cur, ctx,
++ )
++ .expect(
++ "Item::type_param shouldn't \
++ ever fail if we are looking \
++ at a TemplateTypeParameter",
++ );
+ args.push(param);
+ }
+ _ => {}
+@@ -973,7 +975,7 @@
+ Err(..) => {
+ error!(
+ "Failed to parse template alias \
+- {:?}",
++ {:?}",
+ location
+ );
+ return Err(ParseError::Continue);
+@@ -1009,10 +1011,8 @@
+
+ debug!(
+ "TypeRef: location = {:?}; referenced = \
+- {:?}; referenced_ty = {:?}",
+- location,
+- referenced,
+- referenced_ty
++ {:?}; referenced_ty = {:?}",
++ location, referenced, referenced_ty
+ );
+
+ let id = Item::from_ty_or_ref_with_id(
+@@ -1022,7 +1022,9 @@
+ parent_id,
+ ctx,
+ );
+- return Ok(ParseResult::AlreadyResolved(id.into()));
++ return Ok(ParseResult::AlreadyResolved(
++ id.into(),
++ ));
+ }
+ CXCursor_NamespaceRef => {
+ return Err(ParseError::Continue);
+@@ -1078,11 +1080,10 @@
+ let inner =
+ Item::from_ty_or_ref(pointee, location, None, ctx);
+ TypeKind::BlockPointer(inner)
+- },
++ }
+ // XXX: RValueReference is most likely wrong, but I don't think we
+ // can even add bindings for that, so huh.
+- CXType_RValueReference |
+- CXType_LValueReference => {
++ CXType_RValueReference | CXType_LValueReference => {
+ let inner = Item::from_ty_or_ref(
+ ty.pointee_type().unwrap(),
+ location,
+@@ -1092,14 +1093,14 @@
+ TypeKind::Reference(inner)
+ }
+ // XXX DependentSizedArray is wrong
+- CXType_VariableArray |
+- CXType_DependentSizedArray => {
++ CXType_VariableArray | CXType_DependentSizedArray => {
+ let inner = Item::from_ty(
+ ty.elem_type().as_ref().unwrap(),
+ location,
+ None,
+ ctx,
+- ).expect("Not able to resolve array element?");
++ )
++ .expect("Not able to resolve array element?");
+ TypeKind::Pointer(inner)
+ }
+ CXType_IncompleteArray => {
+@@ -1108,13 +1109,12 @@
+ location,
+ None,
+ ctx,
+- ).expect("Not able to resolve array element?");
++ )
++ .expect("Not able to resolve array element?");
+ TypeKind::Array(inner, 0)
+ }
+- CXType_FunctionNoProto |
+- CXType_FunctionProto => {
+- let signature =
+- FunctionSig::from_ty(ty, &location, ctx)?;
++ CXType_FunctionNoProto | CXType_FunctionProto => {
++ let signature = FunctionSig::from_ty(ty, &location, ctx)?;
+ TypeKind::Function(signature)
+ }
+ CXType_Typedef => {
+@@ -1141,7 +1141,8 @@
+ ty,
+ Some(location),
+ ctx,
+- ).expect("Not a complex type?");
++ )
++ .expect("Not a complex type?");
+
+ if name.is_empty() {
+ // The pretty-printed name may contain typedefed name,
+@@ -1160,7 +1161,8 @@
+ location,
+ None,
+ ctx,
+- ).expect("Not able to resolve vector element?");
++ )
++ .expect("Not able to resolve vector element?");
+ TypeKind::Vector(inner, ty.num_elements().unwrap())
+ }
+ CXType_ConstantArray => {
+@@ -1169,7 +1171,8 @@
+ location,
+ None,
+ ctx,
+- ).expect("Not able to resolve array element?");
++ )
++ .expect("Not able to resolve array element?");
+ TypeKind::Array(inner, ty.num_elements().unwrap())
+ }
+ CXType_Elaborated => {
+@@ -1183,8 +1186,7 @@
+ }
+ CXType_ObjCId => TypeKind::ObjCId,
+ CXType_ObjCSel => TypeKind::ObjCSel,
+- CXType_ObjCClass |
+- CXType_ObjCInterface => {
++ CXType_ObjCClass | CXType_ObjCInterface => {
+ let interface = ObjCInterface::from_ty(&location, ctx)
+ .expect("Not a valid objc interface?");
+ name = interface.rust_name();
+diff --git a/third_party/rust/bindgen/src/ir/var.rs b/third_party/rust/bindgen/src/ir/var.rs
+--- a/third_party/rust/bindgen/src/ir/var.rs
++++ b/third_party/rust/bindgen/src/ir/var.rs
+@@ -1,12 +1,12 @@
+ //! Intermediate representation of variables.
+
+-use callbacks::MacroParsingBehavior;
+ use super::context::{BindgenContext, TypeId};
+ use super::dot::DotAttributes;
+ use super::function::cursor_mangling;
+ use super::int::IntKind;
+ use super::item::Item;
+ use super::ty::{FloatKind, TypeKind};
++use callbacks::MacroParsingBehavior;
+ use cexpr;
+ use clang;
+ use parse::{ClangItemParser, ClangSubItemParser, ParseError, ParseResult};
+@@ -133,9 +133,9 @@
+ cursor: clang::Cursor,
+ ctx: &mut BindgenContext,
+ ) -> Result<ParseResult<Self>, ParseError> {
+- use clang_sys::*;
+ use cexpr::expr::EvalResult;
+ use cexpr::literal::CChar;
++ use clang_sys::*;
+ match cursor.kind() {
+ CXCursor_MacroDefinition => {
+ if let Some(callbacks) = ctx.parse_callbacks() {
+@@ -205,9 +205,12 @@
+ (TypeKind::Pointer(char_ty), VarType::String(val))
+ }
+ EvalResult::Int(Wrapping(value)) => {
+- let kind = ctx.parse_callbacks()
++ let kind = ctx
++ .parse_callbacks()
+ .and_then(|c| c.int_macro(&name, value))
+- .unwrap_or_else(|| default_macro_constant_type(value));
++ .unwrap_or_else(|| {
++ default_macro_constant_type(value)
++ });
+
+ (TypeKind::Int(kind), VarType::Int(value))
+ }
+@@ -239,7 +242,7 @@
+ ty.kind(),
+ CXType_Auto,
+ "Couldn't resolve constant type, and it \
+- wasn't an nondeductible auto type!"
++ wasn't an nondeductible auto type!"
+ );
+ return Err(e);
+ }
+@@ -249,9 +252,9 @@
+ // tests/headers/inner_const.hpp
+ //
+ // That's fine because in that case we know it's not a literal.
+- let canonical_ty = ctx.safe_resolve_type(ty).and_then(|t| {
+- t.safe_canonical_type(ctx)
+- });
++ let canonical_ty = ctx
++ .safe_resolve_type(ty)
++ .and_then(|t| t.safe_canonical_type(ctx));
+
+ let is_integer = canonical_ty.map_or(false, |t| t.is_integer());
+ let is_float = canonical_ty.map_or(false, |t| t.is_float());
+@@ -266,27 +269,29 @@
+ _ => unreachable!(),
+ };
+
+- let mut val = cursor
+- .evaluate()
+- .and_then(|v| v.as_int());
++ let mut val = cursor.evaluate().and_then(|v| v.as_int());
+ if val.is_none() || !kind.signedness_matches(val.unwrap()) {
+ let tu = ctx.translation_unit();
+ val = get_integer_literal_from_cursor(&cursor, tu);
+ }
+
+- val.map(|val| if kind == IntKind::Bool {
+- VarType::Bool(val != 0)
+- } else {
+- VarType::Int(val)
++ val.map(|val| {
++ if kind == IntKind::Bool {
++ VarType::Bool(val != 0)
++ } else {
++ VarType::Int(val)
++ }
+ })
+ } else if is_float {
+- cursor.evaluate().and_then(|v| v.as_double()).map(
+- VarType::Float,
+- )
++ cursor
++ .evaluate()
++ .and_then(|v| v.as_double())
++ .map(VarType::Float)
+ } else {
+- cursor.evaluate().and_then(|v| v.as_literal_string()).map(
+- VarType::String,
+- )
++ cursor
++ .evaluate()
++ .and_then(|v| v.as_literal_string())
++ .map(VarType::String)
+ };
+
+ let mangling = cursor_mangling(ctx, &cursor);
+@@ -355,8 +360,7 @@
+ let mut value = None;
+ cursor.visit(|c| {
+ match c.kind() {
+- CXCursor_IntegerLiteral |
+- CXCursor_UnaryOperator => {
++ CXCursor_IntegerLiteral | CXCursor_UnaryOperator => {
+ value = parse_int_literal_tokens(&c);
+ }
+ CXCursor_UnexposedExpr => {
+diff --git a/third_party/rust/bindgen/src/lib.rs b/third_party/rust/bindgen/src/lib.rs
+--- a/third_party/rust/bindgen/src/lib.rs
++++ b/third_party/rust/bindgen/src/lib.rs
+@@ -14,7 +14,7 @@
+ // constant.
+ #![allow(non_upper_case_globals)]
+ // `quote!` nests quite deeply.
+-#![recursion_limit="128"]
++#![recursion_limit = "128"]
+
+ #[macro_use]
+ extern crate bitflags;
+@@ -23,7 +23,7 @@
+ #[allow(unused_extern_crates)]
+ extern crate cfg_if;
+ extern crate clang_sys;
+-extern crate fxhash;
++extern crate rustc_hash;
+ #[macro_use]
+ extern crate lazy_static;
+ extern crate peeking_take_while;
+@@ -32,6 +32,7 @@
+ extern crate proc_macro2;
+ extern crate regex;
+ extern crate shlex;
++#[cfg(feature = "which-rustfmt")]
+ extern crate which;
+
+ #[cfg(feature = "logging")]
+@@ -81,26 +82,26 @@
+ doc_mod!(parse, parse_docs);
+ doc_mod!(regex_set, regex_set_docs);
+
+-pub use features::{LATEST_STABLE_RUST, RUST_TARGET_STRINGS, RustTarget};
++pub use codegen::EnumVariation;
+ use features::RustFeatures;
++pub use features::{RustTarget, LATEST_STABLE_RUST, RUST_TARGET_STRINGS};
+ use ir::context::{BindgenContext, ItemId};
+ use ir::item::Item;
+ use parse::{ClangItemParser, ParseError};
+ use regex_set::RegexSet;
+-pub use codegen::EnumVariation;
+
+ use std::borrow::Cow;
+ use std::fs::{File, OpenOptions};
+ use std::io::{self, Write};
+-use std::{env, iter};
+ use std::path::{Path, PathBuf};
+ use std::process::{Command, Stdio};
+ use std::sync::Arc;
++use std::{env, iter};
+
+ // Some convenient typedefs for a fast hash map and hash set.
+-type HashMap<K, V> = ::fxhash::FxHashMap<K, V>;
+-type HashSet<K> = ::fxhash::FxHashSet<K>;
+-pub(crate) use ::std::collections::hash_map::Entry;
++type HashMap<K, V> = ::rustc_hash::FxHashMap<K, V>;
++type HashSet<K> = ::rustc_hash::FxHashSet<K>;
++pub(crate) use std::collections::hash_map::Entry;
+
+ fn args_are_cpp(clang_args: &[String]) -> bool {
+ return clang_args
+@@ -225,13 +226,20 @@
+
+ if self.options.default_enum_style != Default::default() {
+ output_vector.push("--default-enum-style=".into());
+- output_vector.push(match self.options.default_enum_style {
+- codegen::EnumVariation::Rust { non_exhaustive: false } => "rust",
+- codegen::EnumVariation::Rust { non_exhaustive: true } => "rust_non_exhaustive",
+- codegen::EnumVariation::Bitfield => "bitfield",
+- codegen::EnumVariation::Consts => "consts",
+- codegen::EnumVariation::ModuleConsts => "moduleconsts",
+- }.into())
++ output_vector.push(
++ match self.options.default_enum_style {
++ codegen::EnumVariation::Rust {
++ non_exhaustive: false,
++ } => "rust",
++ codegen::EnumVariation::Rust {
++ non_exhaustive: true,
++ } => "rust_non_exhaustive",
++ codegen::EnumVariation::Bitfield => "bitfield",
++ codegen::EnumVariation::Consts => "consts",
++ codegen::EnumVariation::ModuleConsts => "moduleconsts",
++ }
++ .into(),
++ )
+ }
+
+ self.options
+@@ -538,7 +546,8 @@
+ output_vector.push("--no-rustfmt-bindings".into());
+ }
+
+- if let Some(path) = self.options
++ if let Some(path) = self
++ .options
+ .rustfmt_configuration_file
+ .as_ref()
+ .and_then(|f| f.to_str())
+@@ -611,9 +620,8 @@
+ ///
+ /// The file `name` will be added to the clang arguments.
+ pub fn header_contents(mut self, name: &str, contents: &str) -> Builder {
+- self.input_header_contents.push(
+- (name.into(), contents.into()),
+- );
++ self.input_header_contents
++ .push((name.into(), contents.into()));
+ self
+ }
+
+@@ -800,7 +808,10 @@
+ }
+
+ /// Set the default style of code to generate for enums
+- pub fn default_enum_style(mut self, arg: codegen::EnumVariation) -> Builder {
++ pub fn default_enum_style(
++ mut self,
++ arg: codegen::EnumVariation,
++ ) -> Builder {
+ self.options.default_enum_style = arg;
+ self
+ }
+@@ -834,7 +845,10 @@
+ ///
+ /// This makes bindgen generate enums instead of constants. Regular
+ /// expressions are supported.
+- pub fn rustified_non_exhaustive_enum<T: AsRef<str>>(mut self, arg: T) -> Builder {
++ pub fn rustified_non_exhaustive_enum<T: AsRef<str>>(
++ mut self,
++ arg: T,
++ ) -> Builder {
+ self.options.rustified_non_exhaustive_enums.insert(arg);
+ self
+ }
+@@ -1146,7 +1160,7 @@
+ /// [`ParseCallbacks`](./callbacks/trait.ParseCallbacks.html) documentation.
+ pub fn parse_callbacks(
+ mut self,
+- cb: Box<callbacks::ParseCallbacks>,
++ cb: Box<dyn callbacks::ParseCallbacks>,
+ ) -> Self {
+ self.options.parse_callbacks = Some(cb);
+ self
+@@ -1200,7 +1214,9 @@
+ /// Generate the Rust bindings using the options built up thus far.
+ pub fn generate(mut self) -> Result<Bindings, ()> {
+ // Add any extra arguments from the environment to the clang command line.
+- if let Some(extra_clang_args) = env::var("BINDGEN_EXTRA_CLANG_ARGS").ok() {
++ if let Some(extra_clang_args) =
++ env::var("BINDGEN_EXTRA_CLANG_ARGS").ok()
++ {
+ // Try to parse it with shell quoting. If we fail, make it one single big argument.
+ if let Some(strings) = shlex::split(&extra_clang_args) {
+ self.options.clang_args.extend(strings);
+@@ -1211,18 +1227,18 @@
+
+ // Transform input headers to arguments on the clang command line.
+ self.options.input_header = self.input_headers.pop();
+- self.options.clang_args.extend(
+- self.input_headers
+- .drain(..)
+- .flat_map(|header| {
+- iter::once("-include".into()).chain(iter::once(header))
+- }),
+- );
++ self.options
++ .clang_args
++ .extend(self.input_headers.drain(..).flat_map(|header| {
++ iter::once("-include".into()).chain(iter::once(header))
++ }));
+
+ self.options.input_unsaved_files.extend(
+- self.input_header_contents.drain(..).map(|(name, contents)| {
+- clang::UnsavedFile::new(&name, &contents)
+- }),
++ self.input_header_contents
++ .drain(..)
++ .map(|(name, contents)| {
++ clang::UnsavedFile::new(&name, &contents)
++ }),
+ );
+
+ Bindings::generate(self.options)
+@@ -1235,14 +1251,19 @@
+ /// `__bindgen.ii`
+ pub fn dump_preprocessed_input(&self) -> io::Result<()> {
+ fn check_is_cpp(name_file: &str) -> bool {
+- name_file.ends_with(".hpp") || name_file.ends_with(".hxx")
+- || name_file.ends_with(".hh")
+- || name_file.ends_with(".h++")
++ name_file.ends_with(".hpp") ||
++ name_file.ends_with(".hxx") ||
++ name_file.ends_with(".hh") ||
++ name_file.ends_with(".h++")
+ }
+
+- let clang = clang_sys::support::Clang::find(None, &[]).ok_or_else(|| {
+- io::Error::new(io::ErrorKind::Other, "Cannot find clang executable")
+- })?;
++ let clang =
++ clang_sys::support::Clang::find(None, &[]).ok_or_else(|| {
++ io::Error::new(
++ io::ErrorKind::Other,
++ "Cannot find clang executable",
++ )
++ })?;
+
+ // The contents of a wrapper file that includes all the input header
+ // files.
+@@ -1500,7 +1521,7 @@
+
+ /// A user-provided visitor to allow customizing different kinds of
+ /// situations.
+- parse_callbacks: Option<Box<callbacks::ParseCallbacks>>,
++ parse_callbacks: Option<Box<dyn callbacks::ParseCallbacks>>,
+
+ /// Which kind of items should we generate? By default, we'll generate all
+ /// of them.
+@@ -1565,7 +1586,6 @@
+
+ /// The absolute path to the rustfmt configuration file, if None, the standard rustfmt
+ /// options are used.
+-
+ rustfmt_configuration_file: Option<PathBuf>,
+
+ /// The set of types that we should not derive `PartialEq` for.
+@@ -1709,9 +1729,10 @@
+ lazy_static! {
+ static ref LIBCLANG: Arc<clang_sys::SharedLibrary> = {
+ clang_sys::load().expect("Unable to find libclang");
+- clang_sys::get_library()
+- .expect("We just loaded libclang and it had better still be \
+- here!")
++ clang_sys::get_library().expect(
++ "We just loaded libclang and it had better still be \
++ here!",
++ )
+ };
+ }
+
+@@ -1732,7 +1753,10 @@
+ ) -> Result<Bindings, ()> {
+ ensure_libclang_is_loaded();
+
+- debug!("Generating bindings, libclang at {}", clang_sys::get_library().unwrap().path().display());
++ debug!(
++ "Generating bindings, libclang at {}",
++ clang_sys::get_library().unwrap().path().display()
++ );
+
+ options.build();
+
+@@ -1745,32 +1769,45 @@
+ // promote them to `-isystem`.
+ let clang_args_for_clang_sys = {
+ let mut last_was_include_prefix = false;
+- options.clang_args.iter().filter(|arg| {
+- if last_was_include_prefix {
+- last_was_include_prefix = false;
+- return false;
+- }
++ options
++ .clang_args
++ .iter()
++ .filter(|arg| {
++ if last_was_include_prefix {
++ last_was_include_prefix = false;
++ return false;
++ }
+
+- let arg = &**arg;
++ let arg = &**arg;
+
+- // https://clang.llvm.org/docs/ClangCommandLineReference.html
+- // -isystem and -isystem-after are harmless.
+- if arg == "-I" || arg == "--include-directory" {
+- last_was_include_prefix = true;
+- return false;
+- }
++ // https://clang.llvm.org/docs/ClangCommandLineReference.html
++ // -isystem and -isystem-after are harmless.
++ if arg == "-I" || arg == "--include-directory" {
++ last_was_include_prefix = true;
++ return false;
++ }
+
+- if arg.starts_with("-I") || arg.starts_with("--include-directory=") {
+- return false;
+- }
++ if arg.starts_with("-I") ||
++ arg.starts_with("--include-directory=")
++ {
++ return false;
++ }
+
+- true
+- }).cloned().collect::<Vec<_>>()
++ true
++ })
++ .cloned()
++ .collect::<Vec<_>>()
+ };
+
+- debug!("Trying to find clang with flags: {:?}", clang_args_for_clang_sys);
++ debug!(
++ "Trying to find clang with flags: {:?}",
++ clang_args_for_clang_sys
++ );
+
+- let clang = match clang_sys::support::Clang::find(None, &clang_args_for_clang_sys) {
++ let clang = match clang_sys::support::Clang::find(
++ None,
++ &clang_args_for_clang_sys,
++ ) {
+ None => return,
+ Some(clang) => clang,
+ };
+@@ -1780,9 +1817,9 @@
+ // Whether we are working with C or C++ inputs.
+ let is_cpp = args_are_cpp(&options.clang_args);
+ let search_paths = if is_cpp {
+- clang.cpp_search_paths
++ clang.cpp_search_paths
+ } else {
+- clang.c_search_paths
++ clang.c_search_paths
+ };
+
+ if let Some(search_paths) = search_paths {
+@@ -1815,7 +1852,10 @@
+ return Err(());
+ }
+ if !can_read(&md.permissions()) {
+- eprintln!("error: insufficient permissions to read '{}'", h);
++ eprintln!(
++ "error: insufficient permissions to read '{}'",
++ h
++ );
+ return Err(());
+ }
+ options.clang_args.push(h.clone())
+@@ -1835,8 +1875,7 @@
+ let mut context = BindgenContext::new(options);
+
+ {
+- let _t = time::Timer::new("parse")
+- .with_output(time_phases);
++ let _t = time::Timer::new("parse").with_output(time_phases);
+ parse(&mut context)?;
+ }
+
+@@ -1846,14 +1885,14 @@
+ options: options,
+ module: quote! {
+ #( #items )*
+- }
++ },
+ })
+ }
+
+ /// Convert these bindings into source text (with raw lines prepended).
+ pub fn to_string(&self) -> String {
+ let mut bytes = vec![];
+- self.write(Box::new(&mut bytes) as Box<Write>)
++ self.write(Box::new(&mut bytes) as Box<dyn Write>)
+ .expect("writing to a vec cannot fail");
+ String::from_utf8(bytes)
+ .expect("we should only write bindings that are valid utf-8")
+@@ -1871,7 +1910,7 @@
+ }
+
+ /// Write these bindings as source text to the given `Write`able.
+- pub fn write<'a>(&self, mut writer: Box<Write + 'a>) -> io::Result<()> {
++ pub fn write<'a>(&self, mut writer: Box<dyn Write + 'a>) -> io::Result<()> {
+ writer.write(
+ "/* automatically generated by rust-bindgen */\n\n".as_bytes(),
+ )?;
+@@ -1890,11 +1929,14 @@
+ match self.rustfmt_generated_string(&bindings) {
+ Ok(rustfmt_bindings) => {
+ writer.write(rustfmt_bindings.as_bytes())?;
+- },
++ }
+ Err(err) => {
+- eprintln!("Failed to run rustfmt: {} (non-fatal, continuing)", err);
++ eprintln!(
++ "Failed to run rustfmt: {} (non-fatal, continuing)",
++ err
++ );
+ writer.write(bindings.as_bytes())?;
+- },
++ }
+ }
+ Ok(())
+ }
+@@ -1908,10 +1950,18 @@
+ if let Ok(rustfmt) = env::var("RUSTFMT") {
+ return Ok(Cow::Owned(rustfmt.into()));
+ }
++ #[cfg(feature = "which-rustfmt")]
+ match which::which("rustfmt") {
+ Ok(p) => Ok(Cow::Owned(p)),
+- Err(e) => Err(io::Error::new(io::ErrorKind::Other, format!("{}", e))),
++ Err(e) => {
++ Err(io::Error::new(io::ErrorKind::Other, format!("{}", e)))
++ }
+ }
++ #[cfg(not(feature = "which-rustfmt"))]
++ Err(io::Error::new(
++ io::ErrorKind::Other,
++ "which wasn't enabled, and no rustfmt binary specified",
++ ))
+ }
+
+ /// Checks if rustfmt_bindings is set and runs rustfmt on the string
+@@ -1929,11 +1979,10 @@
+ let rustfmt = self.rustfmt_path()?;
+ let mut cmd = Command::new(&*rustfmt);
+
+- cmd
+- .stdin(Stdio::piped())
+- .stdout(Stdio::piped());
++ cmd.stdin(Stdio::piped()).stdout(Stdio::piped());
+
+- if let Some(path) = self.options
++ if let Some(path) = self
++ .options
+ .rustfmt_configuration_file
+ .as_ref()
+ .and_then(|f| f.to_str())
+@@ -1959,29 +2008,28 @@
+ io::copy(&mut child_stdout, &mut output)?;
+
+ let status = child.wait()?;
+- let source = stdin_handle.join()
+- .expect("The thread writing to rustfmt's stdin doesn't do \
+- anything that could panic");
++ let source = stdin_handle.join().expect(
++ "The thread writing to rustfmt's stdin doesn't do \
++ anything that could panic",
++ );
+
+ match String::from_utf8(output) {
+- Ok(bindings) => {
+- match status.code() {
+- Some(0) => Ok(Cow::Owned(bindings)),
+- Some(2) => Err(io::Error::new(
+- io::ErrorKind::Other,
+- "Rustfmt parsing errors.".to_string(),
+- )),
+- Some(3) => {
+- warn!("Rustfmt could not format some lines.");
+- Ok(Cow::Owned(bindings))
+- }
+- _ => Err(io::Error::new(
+- io::ErrorKind::Other,
+- "Internal rustfmt error".to_string(),
+- )),
++ Ok(bindings) => match status.code() {
++ Some(0) => Ok(Cow::Owned(bindings)),
++ Some(2) => Err(io::Error::new(
++ io::ErrorKind::Other,
++ "Rustfmt parsing errors.".to_string(),
++ )),
++ Some(3) => {
++ warn!("Rustfmt could not format some lines.");
++ Ok(Cow::Owned(bindings))
+ }
++ _ => Err(io::Error::new(
++ io::ErrorKind::Other,
++ "Internal rustfmt error".to_string(),
++ )),
+ },
+- _ => Ok(Cow::Owned(source))
++ _ => Ok(Cow::Owned(source)),
+ }
+ }
+ }
+@@ -2032,7 +2080,6 @@
+ let cursor = context.translation_unit().cursor();
+
+ if context.options().emit_ast {
+-
+ fn dump_if_not_builtin(cur: &clang::Cursor) -> CXChildVisitResult {
+ if !cur.is_builtin() {
+ clang::ast_dump(&cur, 0)
+@@ -2072,9 +2119,10 @@
+ }
+
+ let raw_v: String = clang::extract_clang_version();
+- let split_v: Option<Vec<&str>> = raw_v.split_whitespace().nth(2).map(|v| {
+- v.split('.').collect()
+- });
++ let split_v: Option<Vec<&str>> = raw_v
++ .split_whitespace()
++ .nth(2)
++ .map(|v| v.split('.').collect());
+ match split_v {
+ Some(v) => {
+ if v.len() >= 2 {
+@@ -2111,13 +2159,14 @@
+ "--no-derive-default",
+ "--generate",
+ "functions,types,vars,methods,constructors,destructors",
+- ].iter()
+- .map(|&x| x.into())
+- .collect::<Vec<String>>();
++ ]
++ .iter()
++ .map(|&x| x.into())
++ .collect::<Vec<String>>();
+
+- assert!(test_cases.iter().all(
+- |ref x| command_line_flags.contains(x),
+- ));
++ assert!(test_cases
++ .iter()
++ .all(|ref x| command_line_flags.contains(x),));
+
+ //Test 2
+ let bindings = ::builder()
+@@ -2136,13 +2185,13 @@
+ "Distinct_Type",
+ "--whitelist-function",
+ "safe_function",
+- ].iter()
+- .map(|&x| x.into())
+- .collect::<Vec<String>>();
++ ]
++ .iter()
++ .map(|&x| x.into())
++ .collect::<Vec<String>>();
+ println!("{:?}", command_line_flags);
+
+- assert!(test_cases.iter().all(
+- |ref x| command_line_flags.contains(x),
+- ));
+-
++ assert!(test_cases
++ .iter()
++ .all(|ref x| command_line_flags.contains(x),));
+ }
+diff --git a/third_party/rust/bindgen/src/main.rs b/third_party/rust/bindgen/src/main.rs
+--- a/third_party/rust/bindgen/src/main.rs
++++ b/third_party/rust/bindgen/src/main.rs
+@@ -45,7 +45,6 @@
+
+ match builder_from_flags(bind_args.into_iter()) {
+ Ok((builder, output, verbose)) => {
+-
+ let builder_result = panic::catch_unwind(|| {
+ builder.generate().expect("Unable to generate bindings")
+ });
+@@ -71,12 +70,12 @@
+ println!("Bindgen unexpectedly panicked");
+ println!(
+ "This may be caused by one of the known-unsupported \
+- things (https://rust-lang.github.io/rust-bindgen/cpp.html), \
+- please modify the bindgen flags to work around it as \
+- described in https://rust-lang.github.io/rust-bindgen/cpp.html"
++ things (https://rust-lang.github.io/rust-bindgen/cpp.html), \
++ please modify the bindgen flags to work around it as \
++ described in https://rust-lang.github.io/rust-bindgen/cpp.html"
+ );
+ println!(
+ "Otherwise, please file an issue at \
+- https://github.com/rust-lang/rust-bindgen/issues/new"
++ https://github.com/rust-lang/rust-bindgen/issues/new"
+ );
+ }
+diff --git a/third_party/rust/bindgen/src/options.rs b/third_party/rust/bindgen/src/options.rs
+--- a/third_party/rust/bindgen/src/options.rs
++++ b/third_party/rust/bindgen/src/options.rs
+@@ -1,14 +1,12 @@
+-use bindgen::{Builder, CodegenConfig, RUST_TARGET_STRINGS, RustTarget, builder, EnumVariation};
++use bindgen::{builder, Builder, CodegenConfig, EnumVariation, RustTarget, RUST_TARGET_STRINGS};
+ use clap::{App, Arg};
+ use std::fs::File;
+-use std::io::{self, Error, ErrorKind, Write, stderr};
++use std::io::{self, stderr, Error, ErrorKind, Write};
+ use std::path::PathBuf;
+ use std::str::FromStr;
+
+ /// Construct a new [`Builder`](./struct.Builder.html) from command line flags.
+-pub fn builder_from_flags<I>(
+- args: I,
+-) -> Result<(Builder, Box<io::Write>, bool), io::Error>
++pub fn builder_from_flags<I>(args: I) -> Result<(Builder, Box<dyn io::Write>, bool), io::Error>
+ where
+ I: Iterator<Item = String>,
+ {
+@@ -31,12 +29,20 @@
+ .help("The default style of code used to generate enums.")
+ .value_name("variant")
+ .default_value("consts")
+- .possible_values(&["consts", "moduleconsts", "bitfield", "rust", "rust_non_exhaustive"])
++ .possible_values(&[
++ "consts",
++ "moduleconsts",
++ "bitfield",
++ "rust",
++ "rust_non_exhaustive",
++ ])
+ .multiple(false),
+ Arg::with_name("bitfield-enum")
+ .long("bitfield-enum")
+- .help("Mark any enum whose name matches <regex> as a set of \
+- bitfield flags.")
++ .help(
++ "Mark any enum whose name matches <regex> as a set of \
++ bitfield flags.",
++ )
+ .value_name("regex")
+ .takes_value(true)
+ .multiple(true)
+@@ -50,16 +56,20 @@
+ .number_of_values(1),
+ Arg::with_name("constified-enum")
+ .long("constified-enum")
+- .help("Mark any enum whose name matches <regex> as a series of \
+- constants.")
++ .help(
++ "Mark any enum whose name matches <regex> as a series of \
++ constants.",
++ )
+ .value_name("regex")
+ .takes_value(true)
+ .multiple(true)
+ .number_of_values(1),
+ Arg::with_name("constified-enum-module")
+ .long("constified-enum-module")
+- .help("Mark any enum whose name matches <regex> as a module of \
+- constants.")
++ .help(
++ "Mark any enum whose name matches <regex> as a module of \
++ constants.",
++ )
+ .value_name("regex")
+ .takes_value(true)
+ .multiple(true)
+@@ -98,14 +108,16 @@
+ .long("no-derive-default")
+ .hidden(true)
+ .help("Avoid deriving Default on any type."),
+- Arg::with_name("impl-debug")
+- .long("impl-debug")
+- .help("Create Debug implementation, if it can not be derived \
+- automatically."),
++ Arg::with_name("impl-debug").long("impl-debug").help(
++ "Create Debug implementation, if it can not be derived \
++ automatically.",
++ ),
+ Arg::with_name("impl-partialeq")
+ .long("impl-partialeq")
+- .help("Create PartialEq implementation, if it can not be derived \
+- automatically."),
++ .help(
++ "Create PartialEq implementation, if it can not be derived \
++ automatically.",
++ ),
+ Arg::with_name("with-derive-default")
+ .long("with-derive-default")
+ .help("Derive Default on any type."),
+@@ -120,22 +132,30 @@
+ .help("Derive partialord on any type."),
+ Arg::with_name("with-derive-eq")
+ .long("with-derive-eq")
+- .help("Derive eq on any type. Enable this option also \
+- enables --with-derive-partialeq"),
++ .help(
++ "Derive eq on any type. Enable this option also \
++ enables --with-derive-partialeq",
++ ),
+ Arg::with_name("with-derive-ord")
+ .long("with-derive-ord")
+- .help("Derive ord on any type. Enable this option also \
+- enables --with-derive-partialord"),
++ .help(
++ "Derive ord on any type. Enable this option also \
++ enables --with-derive-partialord",
++ ),
+ Arg::with_name("no-doc-comments")
+ .long("no-doc-comments")
+- .help("Avoid including doc comments in the output, see: \
+- https://github.com/rust-lang/rust-bindgen/issues/426"),
++ .help(
++ "Avoid including doc comments in the output, see: \
++ https://github.com/rust-lang/rust-bindgen/issues/426",
++ ),
+ Arg::with_name("no-recursive-whitelist")
+ .long("no-recursive-whitelist")
+- .help("Disable whitelisting types recursively. This will cause \
+- bindgen to emit Rust code that won't compile! See the \
+- `bindgen::Builder::whitelist_recursively` method's \
+- documentation for details."),
++ .help(
++ "Disable whitelisting types recursively. This will cause \
++ bindgen to emit Rust code that won't compile! See the \
++ `bindgen::Builder::whitelist_recursively` method's \
++ documentation for details.",
++ ),
+ Arg::with_name("objc-extern-crate")
+ .long("objc-extern-crate")
+ .help("Use extern crate instead of use for objc."),
+@@ -148,23 +168,23 @@
+ Arg::with_name("distrust-clang-mangling")
+ .long("distrust-clang-mangling")
+ .help("Do not trust the libclang-provided mangling"),
+- Arg::with_name("builtins")
+- .long("builtins")
+- .help("Output bindings for builtin definitions, e.g. \
+- __builtin_va_list."),
++ Arg::with_name("builtins").long("builtins").help(
++ "Output bindings for builtin definitions, e.g. \
++ __builtin_va_list.",
++ ),
+ Arg::with_name("ctypes-prefix")
+ .long("ctypes-prefix")
+- .help("Use the given prefix before raw types instead of \
+- ::std::os::raw.")
++ .help(
++ "Use the given prefix before raw types instead of \
++ ::std::os::raw.",
++ )
+ .value_name("prefix")
+ .takes_value(true),
+ Arg::with_name("time-phases")
+ .long("time-phases")
+ .help("Time the different bindgen phases and print to stderr"),
+ // All positional arguments after the end of options marker, `--`
+- Arg::with_name("clang-args")
+- .last(true)
+- .multiple(true),
++ Arg::with_name("clang-args").last(true).multiple(true),
+ Arg::with_name("emit-clang-ast")
+ .long("emit-clang-ast")
+ .help("Output the Clang AST for debugging purposes."),
+@@ -181,18 +201,24 @@
+ .help("Enable support for C++ namespaces."),
+ Arg::with_name("disable-name-namespacing")
+ .long("disable-name-namespacing")
+- .help("Disable namespacing via mangling, causing bindgen to \
+- generate names like \"Baz\" instead of \"foo_bar_Baz\" \
+- for an input name \"foo::bar::Baz\"."),
++ .help(
++ "Disable namespacing via mangling, causing bindgen to \
++ generate names like \"Baz\" instead of \"foo_bar_Baz\" \
++ for an input name \"foo::bar::Baz\".",
++ ),
+ Arg::with_name("ignore-functions")
+ .long("ignore-functions")
+- .help("Do not generate bindings for functions or methods. This \
+- is useful when you only care about struct layouts."),
++ .help(
++ "Do not generate bindings for functions or methods. This \
++ is useful when you only care about struct layouts.",
++ ),
+ Arg::with_name("generate")
+ .long("generate")
+- .help("Generate only given items, split by commas. \
+- Valid values are \"functions\",\"types\", \"vars\", \
+- \"methods\", \"constructors\" and \"destructors\".")
++ .help(
++ "Generate only given items, split by commas. \
++ Valid values are \"functions\",\"types\", \"vars\", \
++ \"methods\", \"constructors\" and \"destructors\".",
++ )
+ .takes_value(true),
+ Arg::with_name("ignore-methods")
+ .long("ignore-methods")
+@@ -237,16 +263,20 @@
+ .help("Use types from Rust core instead of std."),
+ Arg::with_name("conservative-inline-namespaces")
+ .long("conservative-inline-namespaces")
+- .help("Conservatively generate inline namespaces to avoid name \
+- conflicts."),
++ .help(
++ "Conservatively generate inline namespaces to avoid name \
++ conflicts.",
++ ),
+ Arg::with_name("use-msvc-mangling")
+ .long("use-msvc-mangling")
+ .help("MSVC C++ ABI mangling. DEPRECATED: Has no effect."),
+ Arg::with_name("whitelist-function")
+ .long("whitelist-function")
+- .help("Whitelist all the free-standing functions matching \
+- <regex>. Other non-whitelisted functions will not be \
+- generated.")
++ .help(
++ "Whitelist all the free-standing functions matching \
++ <regex>. Other non-whitelisted functions will not be \
++ generated.",
++ )
+ .value_name("regex")
+ .takes_value(true)
+ .multiple(true)
+@@ -256,17 +286,21 @@
+ .help("Generate inline functions."),
+ Arg::with_name("whitelist-type")
+ .long("whitelist-type")
+- .help("Only generate types matching <regex>. Other non-whitelisted types will \
+- not be generated.")
++ .help(
++ "Only generate types matching <regex>. Other non-whitelisted types will \
++ not be generated.",
++ )
+ .value_name("regex")
+ .takes_value(true)
+ .multiple(true)
+ .number_of_values(1),
+ Arg::with_name("whitelist-var")
+ .long("whitelist-var")
+- .help("Whitelist all the free-standing variables matching \
+- <regex>. Other non-whitelisted variables will not be \
+- generated.")
++ .help(
++ "Whitelist all the free-standing variables matching \
++ <regex>. Other non-whitelisted variables will not be \
++ generated.",
++ )
+ .value_name("regex")
+ .takes_value(true)
+ .multiple(true)
+@@ -276,27 +310,35 @@
+ .help("Print verbose error messages."),
+ Arg::with_name("dump-preprocessed-input")
+ .long("dump-preprocessed-input")
+- .help("Preprocess and dump the input header files to disk. \
+- Useful when debugging bindgen, using C-Reduce, or when \
+- filing issues. The resulting file will be named \
+- something like `__bindgen.i` or `__bindgen.ii`."),
++ .help(
++ "Preprocess and dump the input header files to disk. \
++ Useful when debugging bindgen, using C-Reduce, or when \
++ filing issues. The resulting file will be named \
++ something like `__bindgen.i` or `__bindgen.ii`.",
++ ),
+ Arg::with_name("no-record-matches")
+ .long("no-record-matches")
+- .help("Do not record matching items in the regex sets. \
+- This disables reporting of unused items."),
++ .help(
++ "Do not record matching items in the regex sets. \
++ This disables reporting of unused items.",
++ ),
+ Arg::with_name("no-rustfmt-bindings")
+ .long("no-rustfmt-bindings")
+ .help("Do not format the generated bindings with rustfmt."),
+ Arg::with_name("rustfmt-bindings")
+ .long("rustfmt-bindings")
+- .help("Format the generated bindings with rustfmt. DEPRECATED: \
+- --rustfmt-bindings is now enabled by default. Disable \
+- with --no-rustfmt-bindings."),
++ .help(
++ "Format the generated bindings with rustfmt. DEPRECATED: \
++ --rustfmt-bindings is now enabled by default. Disable \
++ with --no-rustfmt-bindings.",
++ ),
+ Arg::with_name("rustfmt-configuration-file")
+ .long("rustfmt-configuration-file")
+- .help("The absolute path to the rustfmt configuration file. \
+- The configuration file will be used for formatting the bindings. \
+- This parameter is incompatible with --no-rustfmt-bindings.")
++ .help(
++ "The absolute path to the rustfmt configuration file. \
++ The configuration file will be used for formatting the bindings. \
++ This parameter is incompatible with --no-rustfmt-bindings.",
++ )
+ .value_name("path")
+ .takes_value(true)
+ .multiple(false)
+@@ -324,8 +366,10 @@
+ .number_of_values(1),
+ Arg::with_name("enable-function-attribute-detection")
+ .long("enable-function-attribute-detection")
+- .help("Enables detecting unexposed attributes in functions (slow).
+- Used to generate #[must_use] annotations."),
++ .help(
++ "Enables detecting unexposed attributes in functions (slow).
++ Used to generate #[must_use] annotations.",
++ ),
+ Arg::with_name("use-array-pointers-in-arguments")
+ .long("use-array-pointers-in-arguments")
+ .help("Use `*const [T; size]` instead of `*const T` for C arrays"),
+@@ -345,7 +389,8 @@
+ writeln!(
+ &mut stderr(),
+ "warning: the `--unstable-rust` option is deprecated"
+- ).expect("Unable to write error message");
++ )
++ .expect("Unable to write error message");
+ }
+
+ if let Some(rust_target) = matches.value_of("rust-target") {
+@@ -600,9 +645,9 @@
+
+ let output = if let Some(path) = matches.value_of("output") {
+ let file = File::create(path)?;
+- Box::new(io::BufWriter::new(file)) as Box<io::Write>
++ Box::new(io::BufWriter::new(file)) as Box<dyn io::Write>
+ } else {
+- Box::new(io::BufWriter::new(io::stdout())) as Box<io::Write>
++ Box::new(io::BufWriter::new(io::stdout())) as Box<dyn io::Write>
+ };
+
+ if matches.is_present("dump-preprocessed-input") {
+@@ -624,7 +669,7 @@
+ if no_rustfmt_bindings {
+ return Err(Error::new(
+ ErrorKind::Other,
+- "Cannot supply both --rustfmt-configuration-file and --no-rustfmt-bindings"
++ "Cannot supply both --rustfmt-configuration-file and --no-rustfmt-bindings",
+ ));
+ }
+
+diff --git a/third_party/rust/bindgen/src/time.rs b/third_party/rust/bindgen/src/time.rs
+--- a/third_party/rust/bindgen/src/time.rs
++++ b/third_party/rust/bindgen/src/time.rs
+@@ -1,6 +1,5 @@
+ use std::io::{self, Write};
+-use std::time::{Instant, Duration};
+-
++use std::time::{Duration, Instant};
+
+ /// RAII timer to measure how long phases take.
+ #[derive(Debug)]
+@@ -10,7 +9,6 @@
+ start: Instant,
+ }
+
+-
+ impl<'a> Timer<'a> {
+ /// Creates a Timer with the given name, and starts it. By default,
+ /// will print to stderr when it is `drop`'d
+@@ -18,7 +16,7 @@
+ Timer {
+ output: true,
+ name,
+- start: Instant::now()
++ start: Instant::now(),
+ }
+ }
+
+@@ -37,19 +35,16 @@
+ fn print_elapsed(&mut self) {
+ if self.output {
+ let elapsed = self.elapsed();
+- let time = (elapsed.as_secs() as f64) * 1e3
+- + (elapsed.subsec_nanos() as f64) / 1e6;
++ let time = (elapsed.as_secs() as f64) * 1e3 +
++ (elapsed.subsec_nanos() as f64) / 1e6;
+ let stderr = io::stderr();
+ // Arbitrary output format, subject to change.
+- writeln!(stderr.lock(),
+- " time: {:>9.3} ms.\t{}",
+- time, self.name)
+- .expect("timer write should not fail");
++ writeln!(stderr.lock(), " time: {:>9.3} ms.\t{}", time, self.name)
++ .expect("timer write should not fail");
+ }
+ }
+ }
+
+-
+ impl<'a> Drop for Timer<'a> {
+ fn drop(&mut self) {
+ self.print_elapsed();
+diff --git a/third_party/rust/proc-macro2-0.4.27/.cargo-checksum.json b/third_party/rust/proc-macro2-0.4.27/.cargo-checksum.json
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/.cargo-checksum.json
+@@ -0,0 +1 @@
++{"files":{"Cargo.toml":"b523856472549844b4bf20eca0473d955a7e5eeb95c70eddd31a05ac455427bb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"89857eaaa305afe540abcf56fabae0194dfb4e7906a8098b7206acb23ed11ce8","build.rs":"36fa668f3bf309f243d0e977e8428446cc424303139c1f63410b3c2e30445aec","src/fallback.rs":"e4d1bcb1e92383a2285e6c947dd74b0e34144904948db68127faea627f5dd6ff","src/lib.rs":"896a1d212e30902ff051313808007406ca4471c27880a6ef19508f0ebb8333ee","src/strnom.rs":"60f5380106dbe568cca7abd09877e133c874fbee95d502e4830425c4613a640d","src/wrapper.rs":"0d7fe28ab2b7ee02b8eb8c5a636da364c60f6704b23e7db0a1ddd57c742f54b1","tests/marker.rs":"0227d07bbc7f2e2ad34662a6acb65668b7dc2f79141c4faa672703a04e27bea0","tests/test.rs":"166d35835355bdaa85bcf69de4dfb56ccddd8acf2e1a8cbc506782632b151674"},"package":"4d317f9caece796be1980837fd5cb3dfec5613ebdb04ad0956deea83ce168915"}
+\ No newline at end of file
+diff --git a/third_party/rust/proc-macro2-0.4.27/Cargo.toml b/third_party/rust/proc-macro2-0.4.27/Cargo.toml
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/Cargo.toml
+@@ -0,0 +1,39 @@
++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
++#
++# When uploading crates to the registry Cargo will automatically
++# "normalize" Cargo.toml files for maximal compatibility
++# with all versions of Cargo and also rewrite `path` dependencies
++# to registry (e.g. crates.io) dependencies
++#
++# If you believe there's an error in this file please file an
++# issue against the rust-lang/cargo repository. If you're
++# editing this file be aware that the upstream Cargo.toml
++# will likely look very different (and much more reasonable)
++
++[package]
++name = "proc-macro2"
++version = "0.4.27"
++authors = ["Alex Crichton <alex@alexcrichton.com>"]
++build = "build.rs"
++description = "A stable implementation of the upcoming new `proc_macro` API. Comes with an\noption, off by default, to also reimplement itself in terms of the upstream\nunstable API.\n"
++homepage = "https://github.com/alexcrichton/proc-macro2"
++documentation = "https://docs.rs/proc-macro2"
++readme = "README.md"
++keywords = ["macros"]
++license = "MIT/Apache-2.0"
++repository = "https://github.com/alexcrichton/proc-macro2"
++[package.metadata.docs.rs]
++rustc-args = ["--cfg", "procmacro2_semver_exempt"]
++rustdoc-args = ["--cfg", "procmacro2_semver_exempt"]
++[dependencies.unicode-xid]
++version = "0.1"
++[dev-dependencies.quote]
++version = "0.6"
++
++[features]
++default = ["proc-macro"]
++nightly = []
++proc-macro = []
++span-locations = []
++[badges.travis-ci]
++repository = "alexcrichton/proc-macro2"
+diff --git a/third_party/rust/proc-macro2-0.4.27/LICENSE-APACHE b/third_party/rust/proc-macro2-0.4.27/LICENSE-APACHE
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/LICENSE-APACHE
+@@ -0,0 +1,201 @@
++ Apache License
++ Version 2.0, January 2004
++ http://www.apache.org/licenses/
++
++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++
++1. Definitions.
++
++ "License" shall mean the terms and conditions for use, reproduction,
++ and distribution as defined by Sections 1 through 9 of this document.
++
++ "Licensor" shall mean the copyright owner or entity authorized by
++ the copyright owner that is granting the License.
++
++ "Legal Entity" shall mean the union of the acting entity and all
++ other entities that control, are controlled by, or are under common
++ control with that entity. For the purposes of this definition,
++ "control" means (i) the power, direct or indirect, to cause the
++ direction or management of such entity, whether by contract or
++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++ outstanding shares, or (iii) beneficial ownership of such entity.
++
++ "You" (or "Your") shall mean an individual or Legal Entity
++ exercising permissions granted by this License.
++
++ "Source" form shall mean the preferred form for making modifications,
++ including but not limited to software source code, documentation
++ source, and configuration files.
++
++ "Object" form shall mean any form resulting from mechanical
++ transformation or translation of a Source form, including but
++ not limited to compiled object code, generated documentation,
++ and conversions to other media types.
++
++ "Work" shall mean the work of authorship, whether in Source or
++ Object form, made available under the License, as indicated by a
++ copyright notice that is included in or attached to the work
++ (an example is provided in the Appendix below).
++
++ "Derivative Works" shall mean any work, whether in Source or Object
++ form, that is based on (or derived from) the Work and for which the
++ editorial revisions, annotations, elaborations, or other modifications
++ represent, as a whole, an original work of authorship. For the purposes
++ of this License, Derivative Works shall not include works that remain
++ separable from, or merely link (or bind by name) to the interfaces of,
++ the Work and Derivative Works thereof.
++
++ "Contribution" shall mean any work of authorship, including
++ the original version of the Work and any modifications or additions
++ to that Work or Derivative Works thereof, that is intentionally
++ submitted to Licensor for inclusion in the Work by the copyright owner
++ or by an individual or Legal Entity authorized to submit on behalf of
++ the copyright owner. For the purposes of this definition, "submitted"
++ means any form of electronic, verbal, or written communication sent
++ to the Licensor or its representatives, including but not limited to
++ communication on electronic mailing lists, source code control systems,
++ and issue tracking systems that are managed by, or on behalf of, the
++ Licensor for the purpose of discussing and improving the Work, but
++ excluding communication that is conspicuously marked or otherwise
++ designated in writing by the copyright owner as "Not a Contribution."
++
++ "Contributor" shall mean Licensor and any individual or Legal Entity
++ on behalf of whom a Contribution has been received by Licensor and
++ subsequently incorporated within the Work.
++
++2. Grant of Copyright License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ copyright license to reproduce, prepare Derivative Works of,
++ publicly display, publicly perform, sublicense, and distribute the
++ Work and such Derivative Works in Source or Object form.
++
++3. Grant of Patent License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ (except as stated in this section) patent license to make, have made,
++ use, offer to sell, sell, import, and otherwise transfer the Work,
++ where such license applies only to those patent claims licensable
++ by such Contributor that are necessarily infringed by their
++ Contribution(s) alone or by combination of their Contribution(s)
++ with the Work to which such Contribution(s) was submitted. If You
++ institute patent litigation against any entity (including a
++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++ or a Contribution incorporated within the Work constitutes direct
++ or contributory patent infringement, then any patent licenses
++ granted to You under this License for that Work shall terminate
++ as of the date such litigation is filed.
++
++4. Redistribution. You may reproduce and distribute copies of the
++ Work or Derivative Works thereof in any medium, with or without
++ modifications, and in Source or Object form, provided that You
++ meet the following conditions:
++
++ (a) You must give any other recipients of the Work or
++ Derivative Works a copy of this License; and
++
++ (b) You must cause any modified files to carry prominent notices
++ stating that You changed the files; and
++
++ (c) You must retain, in the Source form of any Derivative Works
++ that You distribute, all copyright, patent, trademark, and
++ attribution notices from the Source form of the Work,
++ excluding those notices that do not pertain to any part of
++ the Derivative Works; and
++
++ (d) If the Work includes a "NOTICE" text file as part of its
++ distribution, then any Derivative Works that You distribute must
++ include a readable copy of the attribution notices contained
++ within such NOTICE file, excluding those notices that do not
++ pertain to any part of the Derivative Works, in at least one
++ of the following places: within a NOTICE text file distributed
++ as part of the Derivative Works; within the Source form or
++ documentation, if provided along with the Derivative Works; or,
++ within a display generated by the Derivative Works, if and
++ wherever such third-party notices normally appear. The contents
++ of the NOTICE file are for informational purposes only and
++ do not modify the License. You may add Your own attribution
++ notices within Derivative Works that You distribute, alongside
++ or as an addendum to the NOTICE text from the Work, provided
++ that such additional attribution notices cannot be construed
++ as modifying the License.
++
++ You may add Your own copyright statement to Your modifications and
++ may provide additional or different license terms and conditions
++ for use, reproduction, or distribution of Your modifications, or
++ for any such Derivative Works as a whole, provided Your use,
++ reproduction, and distribution of the Work otherwise complies with
++ the conditions stated in this License.
++
++5. Submission of Contributions. Unless You explicitly state otherwise,
++ any Contribution intentionally submitted for inclusion in the Work
++ by You to the Licensor shall be under the terms and conditions of
++ this License, without any additional terms or conditions.
++ Notwithstanding the above, nothing herein shall supersede or modify
++ the terms of any separate license agreement you may have executed
++ with Licensor regarding such Contributions.
++
++6. Trademarks. This License does not grant permission to use the trade
++ names, trademarks, service marks, or product names of the Licensor,
++ except as required for reasonable and customary use in describing the
++ origin of the Work and reproducing the content of the NOTICE file.
++
++7. Disclaimer of Warranty. Unless required by applicable law or
++ agreed to in writing, Licensor provides the Work (and each
++ Contributor provides its Contributions) on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++ implied, including, without limitation, any warranties or conditions
++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++ PARTICULAR PURPOSE. You are solely responsible for determining the
++ appropriateness of using or redistributing the Work and assume any
++ risks associated with Your exercise of permissions under this License.
++
++8. Limitation of Liability. In no event and under no legal theory,
++ whether in tort (including negligence), contract, or otherwise,
++ unless required by applicable law (such as deliberate and grossly
++ negligent acts) or agreed to in writing, shall any Contributor be
++ liable to You for damages, including any direct, indirect, special,
++ incidental, or consequential damages of any character arising as a
++ result of this License or out of the use or inability to use the
++ Work (including but not limited to damages for loss of goodwill,
++ work stoppage, computer failure or malfunction, or any and all
++ other commercial damages or losses), even if such Contributor
++ has been advised of the possibility of such damages.
++
++9. Accepting Warranty or Additional Liability. While redistributing
++ the Work or Derivative Works thereof, You may choose to offer,
++ and charge a fee for, acceptance of support, warranty, indemnity,
++ or other liability obligations and/or rights consistent with this
++ License. However, in accepting such obligations, You may act only
++ on Your own behalf and on Your sole responsibility, not on behalf
++ of any other Contributor, and only if You agree to indemnify,
++ defend, and hold each Contributor harmless for any liability
++ incurred by, or claims asserted against, such Contributor by reason
++ of your accepting any such warranty or additional liability.
++
++END OF TERMS AND CONDITIONS
++
++APPENDIX: How to apply the Apache License to your work.
++
++ To apply the Apache License to your work, attach the following
++ boilerplate notice, with the fields enclosed by brackets "[]"
++ replaced with your own identifying information. (Don't include
++ the brackets!) The text should be enclosed in the appropriate
++ comment syntax for the file format. We also recommend that a
++ file or class name and description of purpose be included on the
++ same "printed page" as the copyright notice for easier
++ identification within third-party archives.
++
++Copyright [yyyy] [name of copyright owner]
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
+diff --git a/third_party/rust/proc-macro2-0.4.27/LICENSE-MIT b/third_party/rust/proc-macro2-0.4.27/LICENSE-MIT
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/LICENSE-MIT
+@@ -0,0 +1,25 @@
++Copyright (c) 2014 Alex Crichton
++
++Permission is hereby granted, free of charge, to any
++person obtaining a copy of this software and associated
++documentation files (the "Software"), to deal in the
++Software without restriction, including without
++limitation the rights to use, copy, modify, merge,
++publish, distribute, sublicense, and/or sell copies of
++the Software, and to permit persons to whom the Software
++is furnished to do so, subject to the following
++conditions:
++
++The above copyright notice and this permission notice
++shall be included in all copies or substantial portions
++of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
+diff --git a/third_party/rust/proc-macro2-0.4.27/README.md b/third_party/rust/proc-macro2-0.4.27/README.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/README.md
+@@ -0,0 +1,100 @@
++# proc-macro2
++
++[![Build Status](https://api.travis-ci.com/alexcrichton/proc-macro2.svg?branch=master)](https://travis-ci.com/alexcrichton/proc-macro2)
++[![Latest Version](https://img.shields.io/crates/v/proc-macro2.svg)](https://crates.io/crates/proc-macro2)
++[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/proc-macro2)
++
++A wrapper around the procedural macro API of the compiler's `proc_macro` crate.
++This library serves three purposes:
++
++- **Bring proc-macro-like functionality to other contexts like build.rs and
++ main.rs.** Types from `proc_macro` are entirely specific to procedural macros
++ and cannot ever exist in code outside of a procedural macro. Meanwhile
++ `proc_macro2` types may exist anywhere including non-macro code. By developing
++ foundational libraries like [syn] and [quote] against `proc_macro2` rather
++ than `proc_macro`, the procedural macro ecosystem becomes easily applicable to
++ many other use cases and we avoid reimplementing non-macro equivalents of
++ those libraries.
++
++- **Make procedural macros unit testable.** As a consequence of being specific
++ to procedural macros, nothing that uses `proc_macro` can be executed from a
++ unit test. In order for helper libraries or components of a macro to be
++ testable in isolation, they must be implemented using `proc_macro2`.
++
++- **Provide the latest and greatest APIs across all compiler versions.**
++ Procedural macros were first introduced to Rust in 1.15.0 with an extremely
++ minimal interface. Since then, many improvements have landed to make macros
++ more flexible and easier to write. This library tracks the procedural macro
++ API of the most recent stable compiler but employs a polyfill to provide that
++ API consistently across any compiler since 1.15.0.
++
++[syn]: https://github.com/dtolnay/syn
++[quote]: https://github.com/dtolnay/quote
++
++## Usage
++
++```toml
++[dependencies]
++proc-macro2 = "0.4"
++```
++
++The skeleton of a typical procedural macro typically looks like this:
++
++```rust
++extern crate proc_macro;
++
++#[proc_macro_derive(MyDerive)]
++pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
++ let input = proc_macro2::TokenStream::from(input);
++
++ let output: proc_macro2::TokenStream = {
++ /* transform input */
++ };
++
++ proc_macro::TokenStream::from(output)
++}
++```
++
++If parsing with [Syn], you'll use [`parse_macro_input!`] instead to propagate
++parse errors correctly back to the compiler when parsing fails.
++
++[`parse_macro_input!`]: https://docs.rs/syn/0.15/syn/macro.parse_macro_input.html
++
++## Unstable features
++
++The default feature set of proc-macro2 tracks the most recent stable compiler
++API. Functionality in `proc_macro` that is not yet stable is not exposed by
++proc-macro2 by default.
++
++To opt into the additional APIs available in the most recent nightly compiler,
++the `procmacro2_semver_exempt` config flag must be passed to rustc. As usual, we
++will polyfill those nightly-only APIs all the way back to Rust 1.15.0. As these
++are unstable APIs that track the nightly compiler, minor versions of proc-macro2
++may make breaking changes to them at any time.
++
++```
++RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build
++```
++
++Note that this must not only be done for your crate, but for any crate that
++depends on your crate. This infectious nature is intentional, as it serves as a
++reminder that you are outside of the normal semver guarantees.
++
++Semver exempt methods are marked as such in the proc-macro2 documentation.
++
++# License
++
++This project is licensed under either of
++
++ * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
++ http://www.apache.org/licenses/LICENSE-2.0)
++ * MIT license ([LICENSE-MIT](LICENSE-MIT) or
++ http://opensource.org/licenses/MIT)
++
++at your option.
++
++### Contribution
++
++Unless you explicitly state otherwise, any contribution intentionally submitted
++for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be
++dual licensed as above, without any additional terms or conditions.
+diff --git a/third_party/rust/proc-macro2-0.4.27/build.rs b/third_party/rust/proc-macro2-0.4.27/build.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/build.rs
+@@ -0,0 +1,133 @@
++// rustc-cfg emitted by the build script:
++//
++// "u128"
++// Include u128 and i128 constructors for proc_macro2::Literal. Enabled on
++// any compiler 1.26+.
++//
++// "use_proc_macro"
++// Link to extern crate proc_macro. Available on any compiler and any target
++// except wasm32. Requires "proc-macro" Cargo cfg to be enabled (default is
++// enabled). On wasm32 we never link to proc_macro even if "proc-macro" cfg
++// is enabled.
++//
++// "wrap_proc_macro"
++// Wrap types from libproc_macro rather than polyfilling the whole API.
++// Enabled on rustc 1.29+ as long as procmacro2_semver_exempt is not set,
++// because we can't emulate the unstable API without emulating everything
++// else. Also enabled unconditionally on nightly, in which case the
++// procmacro2_semver_exempt surface area is implemented by using the
++// nightly-only proc_macro API.
++//
++// "slow_extend"
++// Fallback when `impl Extend for TokenStream` is not available. These impls
++// were added one version later than the rest of the proc_macro token API.
++// Enabled on rustc 1.29 only.
++//
++// "nightly"
++// Enable the Span::unwrap method. This is to support proc_macro_span and
++// proc_macro_diagnostic use on the nightly channel without requiring the
++// semver exemption opt-in. Enabled when building with nightly.
++//
++// "super_unstable"
++// Implement the semver exempt API in terms of the nightly-only proc_macro
++// API. Enabled when using procmacro2_semver_exempt on a nightly compiler.
++//
++// "span_locations"
++// Provide methods Span::start and Span::end which give the line/column
++// location of a token. Enabled by procmacro2_semver_exempt or the
++// "span-locations" Cargo cfg. This is behind a cfg because tracking
++// location inside spans is a performance hit.
++
++use std::env;
++use std::process::Command;
++use std::str;
++
++fn main() {
++ println!("cargo:rerun-if-changed=build.rs");
++
++ let target = env::var("TARGET").unwrap();
++
++ let version = match rustc_version() {
++ Some(version) => version,
++ None => return,
++ };
++
++ if version.minor >= 26 {
++ println!("cargo:rustc-cfg=u128");
++ }
++
++ let semver_exempt = cfg!(procmacro2_semver_exempt);
++ if semver_exempt {
++ // https://github.com/alexcrichton/proc-macro2/issues/147
++ println!("cargo:rustc-cfg=procmacro2_semver_exempt");
++ }
++
++ if semver_exempt || cfg!(feature = "span-locations") {
++ println!("cargo:rustc-cfg=span_locations");
++ }
++
++ if !enable_use_proc_macro(&target) {
++ return;
++ }
++
++ println!("cargo:rustc-cfg=use_proc_macro");
++
++ // Rust 1.29 stabilized the necessary APIs in the `proc_macro` crate
++ if version.nightly || version.minor >= 29 && !semver_exempt {
++ println!("cargo:rustc-cfg=wrap_proc_macro");
++ }
++
++ if version.minor == 29 {
++ println!("cargo:rustc-cfg=slow_extend");
++ }
++
++ if version.nightly {
++ println!("cargo:rustc-cfg=nightly");
++ }
++
++ if semver_exempt && version.nightly {
++ println!("cargo:rustc-cfg=super_unstable");
++ }
++}
++
++fn enable_use_proc_macro(target: &str) -> bool {
++ // wasm targets don't have the `proc_macro` crate, disable this feature.
++ if target.contains("wasm32") {
++ return false;
++ }
++
++ // Otherwise, only enable it if our feature is actually enabled.
++ cfg!(feature = "proc-macro")
++}
++
++struct RustcVersion {
++ minor: u32,
++ nightly: bool,
++}
++
++fn rustc_version() -> Option<RustcVersion> {
++ macro_rules! otry {
++ ($e:expr) => {
++ match $e {
++ Some(e) => e,
++ None => return None,
++ }
++ };
++ }
++
++ let rustc = otry!(env::var_os("RUSTC"));
++ let output = otry!(Command::new(rustc).arg("--version").output().ok());
++ let version = otry!(str::from_utf8(&output.stdout).ok());
++ let nightly = version.contains("nightly");
++ let mut pieces = version.split('.');
++ if pieces.next() != Some("rustc 1") {
++ return None;
++ }
++ let minor = otry!(pieces.next());
++ let minor = otry!(minor.parse().ok());
++
++ Some(RustcVersion {
++ minor: minor,
++ nightly: nightly,
++ })
++}
+diff --git a/third_party/rust/proc-macro2-0.4.27/src/fallback.rs b/third_party/rust/proc-macro2-0.4.27/src/fallback.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/src/fallback.rs
+@@ -0,0 +1,1421 @@
++#[cfg(span_locations)]
++use std::cell::RefCell;
++#[cfg(procmacro2_semver_exempt)]
++use std::cmp;
++use std::fmt;
++use std::iter;
++#[cfg(procmacro2_semver_exempt)]
++use std::path::Path;
++use std::path::PathBuf;
++use std::str::FromStr;
++use std::vec;
++
++use strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult};
++use unicode_xid::UnicodeXID;
++
++use {Delimiter, Punct, Spacing, TokenTree};
++
++#[derive(Clone)]
++pub struct TokenStream {
++ inner: Vec<TokenTree>,
++}
++
++#[derive(Debug)]
++pub struct LexError;
++
++impl TokenStream {
++ pub fn new() -> TokenStream {
++ TokenStream { inner: Vec::new() }
++ }
++
++ pub fn is_empty(&self) -> bool {
++ self.inner.len() == 0
++ }
++}
++
++#[cfg(span_locations)]
++fn get_cursor(src: &str) -> Cursor {
++ // Create a dummy file & add it to the codemap
++ CODEMAP.with(|cm| {
++ let mut cm = cm.borrow_mut();
++ let name = format!("<parsed string {}>", cm.files.len());
++ let span = cm.add_file(&name, src);
++ Cursor {
++ rest: src,
++ off: span.lo,
++ }
++ })
++}
++
++#[cfg(not(span_locations))]
++fn get_cursor(src: &str) -> Cursor {
++ Cursor { rest: src }
++}
++
++impl FromStr for TokenStream {
++ type Err = LexError;
++
++ fn from_str(src: &str) -> Result<TokenStream, LexError> {
++ // Create a dummy file & add it to the codemap
++ let cursor = get_cursor(src);
++
++ match token_stream(cursor) {
++ Ok((input, output)) => {
++ if skip_whitespace(input).len() != 0 {
++ Err(LexError)
++ } else {
++ Ok(output)
++ }
++ }
++ Err(LexError) => Err(LexError),
++ }
++ }
++}
++
++impl fmt::Display for TokenStream {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ let mut joint = false;
++ for (i, tt) in self.inner.iter().enumerate() {
++ if i != 0 && !joint {
++ write!(f, " ")?;
++ }
++ joint = false;
++ match *tt {
++ TokenTree::Group(ref tt) => {
++ let (start, end) = match tt.delimiter() {
++ Delimiter::Parenthesis => ("(", ")"),
++ Delimiter::Brace => ("{", "}"),
++ Delimiter::Bracket => ("[", "]"),
++ Delimiter::None => ("", ""),
++ };
++ if tt.stream().into_iter().next().is_none() {
++ write!(f, "{} {}", start, end)?
++ } else {
++ write!(f, "{} {} {}", start, tt.stream(), end)?
++ }
++ }
++ TokenTree::Ident(ref tt) => write!(f, "{}", tt)?,
++ TokenTree::Punct(ref tt) => {
++ write!(f, "{}", tt.as_char())?;
++ match tt.spacing() {
++ Spacing::Alone => {}
++ Spacing::Joint => joint = true,
++ }
++ }
++ TokenTree::Literal(ref tt) => write!(f, "{}", tt)?,
++ }
++ }
++
++ Ok(())
++ }
++}
++
++impl fmt::Debug for TokenStream {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ f.write_str("TokenStream ")?;
++ f.debug_list().entries(self.clone()).finish()
++ }
++}
++
++#[cfg(use_proc_macro)]
++impl From<::proc_macro::TokenStream> for TokenStream {
++ fn from(inner: ::proc_macro::TokenStream) -> TokenStream {
++ inner
++ .to_string()
++ .parse()
++ .expect("compiler token stream parse failed")
++ }
++}
++
++#[cfg(use_proc_macro)]
++impl From<TokenStream> for ::proc_macro::TokenStream {
++ fn from(inner: TokenStream) -> ::proc_macro::TokenStream {
++ inner
++ .to_string()
++ .parse()
++ .expect("failed to parse to compiler tokens")
++ }
++}
++
++impl From<TokenTree> for TokenStream {
++ fn from(tree: TokenTree) -> TokenStream {
++ TokenStream { inner: vec![tree] }
++ }
++}
++
++impl iter::FromIterator<TokenTree> for TokenStream {
++ fn from_iter<I: IntoIterator<Item = TokenTree>>(streams: I) -> Self {
++ let mut v = Vec::new();
++
++ for token in streams.into_iter() {
++ v.push(token);
++ }
++
++ TokenStream { inner: v }
++ }
++}
++
++impl iter::FromIterator<TokenStream> for TokenStream {
++ fn from_iter<I: IntoIterator<Item = TokenStream>>(streams: I) -> Self {
++ let mut v = Vec::new();
++
++ for stream in streams.into_iter() {
++ v.extend(stream.inner);
++ }
++
++ TokenStream { inner: v }
++ }
++}
++
++impl Extend<TokenTree> for TokenStream {
++ fn extend<I: IntoIterator<Item = TokenTree>>(&mut self, streams: I) {
++ self.inner.extend(streams);
++ }
++}
++
++impl Extend<TokenStream> for TokenStream {
++ fn extend<I: IntoIterator<Item = TokenStream>>(&mut self, streams: I) {
++ self.inner
++ .extend(streams.into_iter().flat_map(|stream| stream));
++ }
++}
++
++pub type TokenTreeIter = vec::IntoIter<TokenTree>;
++
++impl IntoIterator for TokenStream {
++ type Item = TokenTree;
++ type IntoIter = TokenTreeIter;
++
++ fn into_iter(self) -> TokenTreeIter {
++ self.inner.into_iter()
++ }
++}
++
++#[derive(Clone, PartialEq, Eq)]
++pub struct SourceFile {
++ path: PathBuf,
++}
++
++impl SourceFile {
++ /// Get the path to this source file as a string.
++ pub fn path(&self) -> PathBuf {
++ self.path.clone()
++ }
++
++ pub fn is_real(&self) -> bool {
++ // XXX(nika): Support real files in the future?
++ false
++ }
++}
++
++impl fmt::Debug for SourceFile {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ f.debug_struct("SourceFile")
++ .field("path", &self.path())
++ .field("is_real", &self.is_real())
++ .finish()
++ }
++}
++
++#[derive(Clone, Copy, Debug, PartialEq, Eq)]
++pub struct LineColumn {
++ pub line: usize,
++ pub column: usize,
++}
++
++#[cfg(span_locations)]
++thread_local! {
++ static CODEMAP: RefCell<Codemap> = RefCell::new(Codemap {
++ // NOTE: We start with a single dummy file which all call_site() and
++ // def_site() spans reference.
++ files: vec![{
++ #[cfg(procmacro2_semver_exempt)]
++ {
++ FileInfo {
++ name: "<unspecified>".to_owned(),
++ span: Span { lo: 0, hi: 0 },
++ lines: vec![0],
++ }
++ }
++
++ #[cfg(not(procmacro2_semver_exempt))]
++ {
++ FileInfo {
++ span: Span { lo: 0, hi: 0 },
++ lines: vec![0],
++ }
++ }
++ }],
++ });
++}
++
++#[cfg(span_locations)]
++struct FileInfo {
++ #[cfg(procmacro2_semver_exempt)]
++ name: String,
++ span: Span,
++ lines: Vec<usize>,
++}
++
++#[cfg(span_locations)]
++impl FileInfo {
++ fn offset_line_column(&self, offset: usize) -> LineColumn {
++ assert!(self.span_within(Span {
++ lo: offset as u32,
++ hi: offset as u32
++ }));
++ let offset = offset - self.span.lo as usize;
++ match self.lines.binary_search(&offset) {
++ Ok(found) => LineColumn {
++ line: found + 1,
++ column: 0,
++ },
++ Err(idx) => LineColumn {
++ line: idx,
++ column: offset - self.lines[idx - 1],
++ },
++ }
++ }
++
++ fn span_within(&self, span: Span) -> bool {
++ span.lo >= self.span.lo && span.hi <= self.span.hi
++ }
++}
++
++/// Computesthe offsets of each line in the given source string.
++#[cfg(span_locations)]
++fn lines_offsets(s: &str) -> Vec<usize> {
++ let mut lines = vec![0];
++ let mut prev = 0;
++ while let Some(len) = s[prev..].find('\n') {
++ prev += len + 1;
++ lines.push(prev);
++ }
++ lines
++}
++
++#[cfg(span_locations)]
++struct Codemap {
++ files: Vec<FileInfo>,
++}
++
++#[cfg(span_locations)]
++impl Codemap {
++ fn next_start_pos(&self) -> u32 {
++ // Add 1 so there's always space between files.
++ //
++ // We'll always have at least 1 file, as we initialize our files list
++ // with a dummy file.
++ self.files.last().unwrap().span.hi + 1
++ }
++
++ fn add_file(&mut self, name: &str, src: &str) -> Span {
++ let lines = lines_offsets(src);
++ let lo = self.next_start_pos();
++ // XXX(nika): Shouild we bother doing a checked cast or checked add here?
++ let span = Span {
++ lo: lo,
++ hi: lo + (src.len() as u32),
++ };
++
++ #[cfg(procmacro2_semver_exempt)]
++ self.files.push(FileInfo {
++ name: name.to_owned(),
++ span: span,
++ lines: lines,
++ });
++
++ #[cfg(not(procmacro2_semver_exempt))]
++ self.files.push(FileInfo {
++ span: span,
++ lines: lines,
++ });
++ let _ = name;
++
++ span
++ }
++
++ fn fileinfo(&self, span: Span) -> &FileInfo {
++ for file in &self.files {
++ if file.span_within(span) {
++ return file;
++ }
++ }
++ panic!("Invalid span with no related FileInfo!");
++ }
++}
++
++#[derive(Clone, Copy, PartialEq, Eq)]
++pub struct Span {
++ #[cfg(span_locations)]
++ lo: u32,
++ #[cfg(span_locations)]
++ hi: u32,
++}
++
++impl Span {
++ #[cfg(not(span_locations))]
++ pub fn call_site() -> Span {
++ Span {}
++ }
++
++ #[cfg(span_locations)]
++ pub fn call_site() -> Span {
++ Span { lo: 0, hi: 0 }
++ }
++
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn def_site() -> Span {
++ Span::call_site()
++ }
++
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn resolved_at(&self, _other: Span) -> Span {
++ // Stable spans consist only of line/column information, so
++ // `resolved_at` and `located_at` only select which span the
++ // caller wants line/column information from.
++ *self
++ }
++
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn located_at(&self, other: Span) -> Span {
++ other
++ }
++
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn source_file(&self) -> SourceFile {
++ CODEMAP.with(|cm| {
++ let cm = cm.borrow();
++ let fi = cm.fileinfo(*self);
++ SourceFile {
++ path: Path::new(&fi.name).to_owned(),
++ }
++ })
++ }
++
++ #[cfg(span_locations)]
++ pub fn start(&self) -> LineColumn {
++ CODEMAP.with(|cm| {
++ let cm = cm.borrow();
++ let fi = cm.fileinfo(*self);
++ fi.offset_line_column(self.lo as usize)
++ })
++ }
++
++ #[cfg(span_locations)]
++ pub fn end(&self) -> LineColumn {
++ CODEMAP.with(|cm| {
++ let cm = cm.borrow();
++ let fi = cm.fileinfo(*self);
++ fi.offset_line_column(self.hi as usize)
++ })
++ }
++
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn join(&self, other: Span) -> Option<Span> {
++ CODEMAP.with(|cm| {
++ let cm = cm.borrow();
++ // If `other` is not within the same FileInfo as us, return None.
++ if !cm.fileinfo(*self).span_within(other) {
++ return None;
++ }
++ Some(Span {
++ lo: cmp::min(self.lo, other.lo),
++ hi: cmp::max(self.hi, other.hi),
++ })
++ })
++ }
++}
++
++impl fmt::Debug for Span {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ #[cfg(procmacro2_semver_exempt)]
++ return write!(f, "bytes({}..{})", self.lo, self.hi);
++
++ #[cfg(not(procmacro2_semver_exempt))]
++ write!(f, "Span")
++ }
++}
++
++pub fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) {
++ if cfg!(procmacro2_semver_exempt) {
++ debug.field("span", &span);
++ }
++}
++
++#[derive(Clone)]
++pub struct Group {
++ delimiter: Delimiter,
++ stream: TokenStream,
++ span: Span,
++}
++
++impl Group {
++ pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group {
++ Group {
++ delimiter: delimiter,
++ stream: stream,
++ span: Span::call_site(),
++ }
++ }
++
++ pub fn delimiter(&self) -> Delimiter {
++ self.delimiter
++ }
++
++ pub fn stream(&self) -> TokenStream {
++ self.stream.clone()
++ }
++
++ pub fn span(&self) -> Span {
++ self.span
++ }
++
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn span_open(&self) -> Span {
++ self.span
++ }
++
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn span_close(&self) -> Span {
++ self.span
++ }
++
++ pub fn set_span(&mut self, span: Span) {
++ self.span = span;
++ }
++}
++
++impl fmt::Display for Group {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ let (left, right) = match self.delimiter {
++ Delimiter::Parenthesis => ("(", ")"),
++ Delimiter::Brace => ("{", "}"),
++ Delimiter::Bracket => ("[", "]"),
++ Delimiter::None => ("", ""),
++ };
++
++ f.write_str(left)?;
++ self.stream.fmt(f)?;
++ f.write_str(right)?;
++
++ Ok(())
++ }
++}
++
++impl fmt::Debug for Group {
++ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
++ let mut debug = fmt.debug_struct("Group");
++ debug.field("delimiter", &self.delimiter);
++ debug.field("stream", &self.stream);
++ #[cfg(procmacro2_semver_exempt)]
++ debug.field("span", &self.span);
++ debug.finish()
++ }
++}
++
++#[derive(Clone)]
++pub struct Ident {
++ sym: String,
++ span: Span,
++ raw: bool,
++}
++
++impl Ident {
++ fn _new(string: &str, raw: bool, span: Span) -> Ident {
++ validate_term(string);
++
++ Ident {
++ sym: string.to_owned(),
++ span: span,
++ raw: raw,
++ }
++ }
++
++ pub fn new(string: &str, span: Span) -> Ident {
++ Ident::_new(string, false, span)
++ }
++
++ pub fn new_raw(string: &str, span: Span) -> Ident {
++ Ident::_new(string, true, span)
++ }
++
++ pub fn span(&self) -> Span {
++ self.span
++ }
++
++ pub fn set_span(&mut self, span: Span) {
++ self.span = span;
++ }
++}
++
++#[inline]
++fn is_ident_start(c: char) -> bool {
++ ('a' <= c && c <= 'z')
++ || ('A' <= c && c <= 'Z')
++ || c == '_'
++ || (c > '\x7f' && UnicodeXID::is_xid_start(c))
++}
++
++#[inline]
++fn is_ident_continue(c: char) -> bool {
++ ('a' <= c && c <= 'z')
++ || ('A' <= c && c <= 'Z')
++ || c == '_'
++ || ('0' <= c && c <= '9')
++ || (c > '\x7f' && UnicodeXID::is_xid_continue(c))
++}
++
++fn validate_term(string: &str) {
++ let validate = string;
++ if validate.is_empty() {
++ panic!("Ident is not allowed to be empty; use Option<Ident>");
++ }
++
++ if validate.bytes().all(|digit| digit >= b'0' && digit <= b'9') {
++ panic!("Ident cannot be a number; use Literal instead");
++ }
++
++ fn ident_ok(string: &str) -> bool {
++ let mut chars = string.chars();
++ let first = chars.next().unwrap();
++ if !is_ident_start(first) {
++ return false;
++ }
++ for ch in chars {
++ if !is_ident_continue(ch) {
++ return false;
++ }
++ }
++ true
++ }
++
++ if !ident_ok(validate) {
++ panic!("{:?} is not a valid Ident", string);
++ }
++}
++
++impl PartialEq for Ident {
++ fn eq(&self, other: &Ident) -> bool {
++ self.sym == other.sym && self.raw == other.raw
++ }
++}
++
++impl<T> PartialEq<T> for Ident
++where
++ T: ?Sized + AsRef<str>,
++{
++ fn eq(&self, other: &T) -> bool {
++ let other = other.as_ref();
++ if self.raw {
++ other.starts_with("r#") && self.sym == other[2..]
++ } else {
++ self.sym == other
++ }
++ }
++}
++
++impl fmt::Display for Ident {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ if self.raw {
++ "r#".fmt(f)?;
++ }
++ self.sym.fmt(f)
++ }
++}
++
++impl fmt::Debug for Ident {
++ // Ident(proc_macro), Ident(r#union)
++ #[cfg(not(procmacro2_semver_exempt))]
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ let mut debug = f.debug_tuple("Ident");
++ debug.field(&format_args!("{}", self));
++ debug.finish()
++ }
++
++ // Ident {
++ // sym: proc_macro,
++ // span: bytes(128..138)
++ // }
++ #[cfg(procmacro2_semver_exempt)]
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ let mut debug = f.debug_struct("Ident");
++ debug.field("sym", &format_args!("{}", self));
++ debug.field("span", &self.span);
++ debug.finish()
++ }
++}
++
++#[derive(Clone)]
++pub struct Literal {
++ text: String,
++ span: Span,
++}
++
++macro_rules! suffixed_numbers {
++ ($($name:ident => $kind:ident,)*) => ($(
++ pub fn $name(n: $kind) -> Literal {
++ Literal::_new(format!(concat!("{}", stringify!($kind)), n))
++ }
++ )*)
++}
++
++macro_rules! unsuffixed_numbers {
++ ($($name:ident => $kind:ident,)*) => ($(
++ pub fn $name(n: $kind) -> Literal {
++ Literal::_new(n.to_string())
++ }
++ )*)
++}
++
++impl Literal {
++ fn _new(text: String) -> Literal {
++ Literal {
++ text: text,
++ span: Span::call_site(),
++ }
++ }
++
++ suffixed_numbers! {
++ u8_suffixed => u8,
++ u16_suffixed => u16,
++ u32_suffixed => u32,
++ u64_suffixed => u64,
++ usize_suffixed => usize,
++ i8_suffixed => i8,
++ i16_suffixed => i16,
++ i32_suffixed => i32,
++ i64_suffixed => i64,
++ isize_suffixed => isize,
++
++ f32_suffixed => f32,
++ f64_suffixed => f64,
++ }
++
++ #[cfg(u128)]
++ suffixed_numbers! {
++ u128_suffixed => u128,
++ i128_suffixed => i128,
++ }
++
++ unsuffixed_numbers! {
++ u8_unsuffixed => u8,
++ u16_unsuffixed => u16,
++ u32_unsuffixed => u32,
++ u64_unsuffixed => u64,
++ usize_unsuffixed => usize,
++ i8_unsuffixed => i8,
++ i16_unsuffixed => i16,
++ i32_unsuffixed => i32,
++ i64_unsuffixed => i64,
++ isize_unsuffixed => isize,
++ }
++
++ #[cfg(u128)]
++ unsuffixed_numbers! {
++ u128_unsuffixed => u128,
++ i128_unsuffixed => i128,
++ }
++
++ pub fn f32_unsuffixed(f: f32) -> Literal {
++ let mut s = f.to_string();
++ if !s.contains(".") {
++ s.push_str(".0");
++ }
++ Literal::_new(s)
++ }
++
++ pub fn f64_unsuffixed(f: f64) -> Literal {
++ let mut s = f.to_string();
++ if !s.contains(".") {
++ s.push_str(".0");
++ }
++ Literal::_new(s)
++ }
++
++ pub fn string(t: &str) -> Literal {
++ let mut s = t
++ .chars()
++ .flat_map(|c| c.escape_default())
++ .collect::<String>();
++ s.push('"');
++ s.insert(0, '"');
++ Literal::_new(s)
++ }
++
++ pub fn character(t: char) -> Literal {
++ Literal::_new(format!("'{}'", t.escape_default().collect::<String>()))
++ }
++
++ pub fn byte_string(bytes: &[u8]) -> Literal {
++ let mut escaped = "b\"".to_string();
++ for b in bytes {
++ match *b {
++ b'\0' => escaped.push_str(r"\0"),
++ b'\t' => escaped.push_str(r"\t"),
++ b'\n' => escaped.push_str(r"\n"),
++ b'\r' => escaped.push_str(r"\r"),
++ b'"' => escaped.push_str("\\\""),
++ b'\\' => escaped.push_str("\\\\"),
++ b'\x20'...b'\x7E' => escaped.push(*b as char),
++ _ => escaped.push_str(&format!("\\x{:02X}", b)),
++ }
++ }
++ escaped.push('"');
++ Literal::_new(escaped)
++ }
++
++ pub fn span(&self) -> Span {
++ self.span
++ }
++
++ pub fn set_span(&mut self, span: Span) {
++ self.span = span;
++ }
++}
++
++impl fmt::Display for Literal {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.text.fmt(f)
++ }
++}
++
++impl fmt::Debug for Literal {
++ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
++ let mut debug = fmt.debug_struct("Literal");
++ debug.field("lit", &format_args!("{}", self.text));
++ #[cfg(procmacro2_semver_exempt)]
++ debug.field("span", &self.span);
++ debug.finish()
++ }
++}
++
++fn token_stream(mut input: Cursor) -> PResult<TokenStream> {
++ let mut trees = Vec::new();
++ loop {
++ let input_no_ws = skip_whitespace(input);
++ if input_no_ws.rest.len() == 0 {
++ break;
++ }
++ if let Ok((a, tokens)) = doc_comment(input_no_ws) {
++ input = a;
++ trees.extend(tokens);
++ continue;
++ }
++
++ let (a, tt) = match token_tree(input_no_ws) {
++ Ok(p) => p,
++ Err(_) => break,
++ };
++ trees.push(tt);
++ input = a;
++ }
++ Ok((input, TokenStream { inner: trees }))
++}
++
++#[cfg(not(span_locations))]
++fn spanned<'a, T>(
++ input: Cursor<'a>,
++ f: fn(Cursor<'a>) -> PResult<'a, T>,
++) -> PResult<'a, (T, ::Span)> {
++ let (a, b) = f(skip_whitespace(input))?;
++ Ok((a, ((b, ::Span::_new_stable(Span::call_site())))))
++}
++
++#[cfg(span_locations)]
++fn spanned<'a, T>(
++ input: Cursor<'a>,
++ f: fn(Cursor<'a>) -> PResult<'a, T>,
++) -> PResult<'a, (T, ::Span)> {
++ let input = skip_whitespace(input);
++ let lo = input.off;
++ let (a, b) = f(input)?;
++ let hi = a.off;
++ let span = ::Span::_new_stable(Span { lo: lo, hi: hi });
++ Ok((a, (b, span)))
++}
++
++fn token_tree(input: Cursor) -> PResult<TokenTree> {
++ let (rest, (mut tt, span)) = spanned(input, token_kind)?;
++ tt.set_span(span);
++ Ok((rest, tt))
++}
++
++named!(token_kind -> TokenTree, alt!(
++ map!(group, |g| TokenTree::Group(::Group::_new_stable(g)))
++ |
++ map!(literal, |l| TokenTree::Literal(::Literal::_new_stable(l))) // must be before symbol
++ |
++ map!(op, TokenTree::Punct)
++ |
++ symbol_leading_ws
++));
++
++named!(group -> Group, alt!(
++ delimited!(
++ punct!("("),
++ token_stream,
++ punct!(")")
++ ) => { |ts| Group::new(Delimiter::Parenthesis, ts) }
++ |
++ delimited!(
++ punct!("["),
++ token_stream,
++ punct!("]")
++ ) => { |ts| Group::new(Delimiter::Bracket, ts) }
++ |
++ delimited!(
++ punct!("{"),
++ token_stream,
++ punct!("}")
++ ) => { |ts| Group::new(Delimiter::Brace, ts) }
++));
++
++fn symbol_leading_ws(input: Cursor) -> PResult<TokenTree> {
++ symbol(skip_whitespace(input))
++}
++
++fn symbol(input: Cursor) -> PResult<TokenTree> {
++ let mut chars = input.char_indices();
++
++ let raw = input.starts_with("r#");
++ if raw {
++ chars.next();
++ chars.next();
++ }
++
++ match chars.next() {
++ Some((_, ch)) if is_ident_start(ch) => {}
++ _ => return Err(LexError),
++ }
++
++ let mut end = input.len();
++ for (i, ch) in chars {
++ if !is_ident_continue(ch) {
++ end = i;
++ break;
++ }
++ }
++
++ let a = &input.rest[..end];
++ if a == "r#_" {
++ Err(LexError)
++ } else {
++ let ident = if raw {
++ ::Ident::_new_raw(&a[2..], ::Span::call_site())
++ } else {
++ ::Ident::new(a, ::Span::call_site())
++ };
++ Ok((input.advance(end), ident.into()))
++ }
++}
++
++fn literal(input: Cursor) -> PResult<Literal> {
++ let input_no_ws = skip_whitespace(input);
++
++ match literal_nocapture(input_no_ws) {
++ Ok((a, ())) => {
++ let start = input.len() - input_no_ws.len();
++ let len = input_no_ws.len() - a.len();
++ let end = start + len;
++ Ok((a, Literal::_new(input.rest[start..end].to_string())))
++ }
++ Err(LexError) => Err(LexError),
++ }
++}
++
++named!(literal_nocapture -> (), alt!(
++ string
++ |
++ byte_string
++ |
++ byte
++ |
++ character
++ |
++ float
++ |
++ int
++));
++
++named!(string -> (), alt!(
++ quoted_string
++ |
++ preceded!(
++ punct!("r"),
++ raw_string
++ ) => { |_| () }
++));
++
++named!(quoted_string -> (), delimited!(
++ punct!("\""),
++ cooked_string,
++ tag!("\"")
++));
++
++fn cooked_string(input: Cursor) -> PResult<()> {
++ let mut chars = input.char_indices().peekable();
++ while let Some((byte_offset, ch)) = chars.next() {
++ match ch {
++ '"' => {
++ return Ok((input.advance(byte_offset), ()));
++ }
++ '\r' => {
++ if let Some((_, '\n')) = chars.next() {
++ // ...
++ } else {
++ break;
++ }
++ }
++ '\\' => match chars.next() {
++ Some((_, 'x')) => {
++ if !backslash_x_char(&mut chars) {
++ break;
++ }
++ }
++ Some((_, 'n')) | Some((_, 'r')) | Some((_, 't')) | Some((_, '\\'))
++ | Some((_, '\'')) | Some((_, '"')) | Some((_, '0')) => {}
++ Some((_, 'u')) => {
++ if !backslash_u(&mut chars) {
++ break;
++ }
++ }
++ Some((_, '\n')) | Some((_, '\r')) => {
++ while let Some(&(_, ch)) = chars.peek() {
++ if ch.is_whitespace() {
++ chars.next();
++ } else {
++ break;
++ }
++ }
++ }
++ _ => break,
++ },
++ _ch => {}
++ }
++ }
++ Err(LexError)
++}
++
++named!(byte_string -> (), alt!(
++ delimited!(
++ punct!("b\""),
++ cooked_byte_string,
++ tag!("\"")
++ ) => { |_| () }
++ |
++ preceded!(
++ punct!("br"),
++ raw_string
++ ) => { |_| () }
++));
++
++fn cooked_byte_string(mut input: Cursor) -> PResult<()> {
++ let mut bytes = input.bytes().enumerate();
++ 'outer: while let Some((offset, b)) = bytes.next() {
++ match b {
++ b'"' => {
++ return Ok((input.advance(offset), ()));
++ }
++ b'\r' => {
++ if let Some((_, b'\n')) = bytes.next() {
++ // ...
++ } else {
++ break;
++ }
++ }
++ b'\\' => match bytes.next() {
++ Some((_, b'x')) => {
++ if !backslash_x_byte(&mut bytes) {
++ break;
++ }
++ }
++ Some((_, b'n')) | Some((_, b'r')) | Some((_, b't')) | Some((_, b'\\'))
++ | Some((_, b'0')) | Some((_, b'\'')) | Some((_, b'"')) => {}
++ Some((newline, b'\n')) | Some((newline, b'\r')) => {
++ let rest = input.advance(newline + 1);
++ for (offset, ch) in rest.char_indices() {
++ if !ch.is_whitespace() {
++ input = rest.advance(offset);
++ bytes = input.bytes().enumerate();
++ continue 'outer;
++ }
++ }
++ break;
++ }
++ _ => break,
++ },
++ b if b < 0x80 => {}
++ _ => break,
++ }
++ }
++ Err(LexError)
++}
++
++fn raw_string(input: Cursor) -> PResult<()> {
++ let mut chars = input.char_indices();
++ let mut n = 0;
++ while let Some((byte_offset, ch)) = chars.next() {
++ match ch {
++ '"' => {
++ n = byte_offset;
++ break;
++ }
++ '#' => {}
++ _ => return Err(LexError),
++ }
++ }
++ for (byte_offset, ch) in chars {
++ match ch {
++ '"' if input.advance(byte_offset + 1).starts_with(&input.rest[..n]) => {
++ let rest = input.advance(byte_offset + 1 + n);
++ return Ok((rest, ()));
++ }
++ '\r' => {}
++ _ => {}
++ }
++ }
++ Err(LexError)
++}
++
++named!(byte -> (), do_parse!(
++ punct!("b") >>
++ tag!("'") >>
++ cooked_byte >>
++ tag!("'") >>
++ (())
++));
++
++fn cooked_byte(input: Cursor) -> PResult<()> {
++ let mut bytes = input.bytes().enumerate();
++ let ok = match bytes.next().map(|(_, b)| b) {
++ Some(b'\\') => match bytes.next().map(|(_, b)| b) {
++ Some(b'x') => backslash_x_byte(&mut bytes),
++ Some(b'n') | Some(b'r') | Some(b't') | Some(b'\\') | Some(b'0') | Some(b'\'')
++ | Some(b'"') => true,
++ _ => false,
++ },
++ b => b.is_some(),
++ };
++ if ok {
++ match bytes.next() {
++ Some((offset, _)) => {
++ if input.chars().as_str().is_char_boundary(offset) {
++ Ok((input.advance(offset), ()))
++ } else {
++ Err(LexError)
++ }
++ }
++ None => Ok((input.advance(input.len()), ())),
++ }
++ } else {
++ Err(LexError)
++ }
++}
++
++named!(character -> (), do_parse!(
++ punct!("'") >>
++ cooked_char >>
++ tag!("'") >>
++ (())
++));
++
++fn cooked_char(input: Cursor) -> PResult<()> {
++ let mut chars = input.char_indices();
++ let ok = match chars.next().map(|(_, ch)| ch) {
++ Some('\\') => match chars.next().map(|(_, ch)| ch) {
++ Some('x') => backslash_x_char(&mut chars),
++ Some('u') => backslash_u(&mut chars),
++ Some('n') | Some('r') | Some('t') | Some('\\') | Some('0') | Some('\'') | Some('"') => {
++ true
++ }
++ _ => false,
++ },
++ ch => ch.is_some(),
++ };
++ if ok {
++ match chars.next() {
++ Some((idx, _)) => Ok((input.advance(idx), ())),
++ None => Ok((input.advance(input.len()), ())),
++ }
++ } else {
++ Err(LexError)
++ }
++}
++
++macro_rules! next_ch {
++ ($chars:ident @ $pat:pat $(| $rest:pat)*) => {
++ match $chars.next() {
++ Some((_, ch)) => match ch {
++ $pat $(| $rest)* => ch,
++ _ => return false,
++ },
++ None => return false
++ }
++ };
++}
++
++fn backslash_x_char<I>(chars: &mut I) -> bool
++where
++ I: Iterator<Item = (usize, char)>,
++{
++ next_ch!(chars @ '0'...'7');
++ next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F');
++ true
++}
++
++fn backslash_x_byte<I>(chars: &mut I) -> bool
++where
++ I: Iterator<Item = (usize, u8)>,
++{
++ next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F');
++ next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F');
++ true
++}
++
++fn backslash_u<I>(chars: &mut I) -> bool
++where
++ I: Iterator<Item = (usize, char)>,
++{
++ next_ch!(chars @ '{');
++ next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F');
++ loop {
++ let c = next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F' | '_' | '}');
++ if c == '}' {
++ return true;
++ }
++ }
++}
++
++fn float(input: Cursor) -> PResult<()> {
++ let (rest, ()) = float_digits(input)?;
++ for suffix in &["f32", "f64"] {
++ if rest.starts_with(suffix) {
++ return word_break(rest.advance(suffix.len()));
++ }
++ }
++ word_break(rest)
++}
++
++fn float_digits(input: Cursor) -> PResult<()> {
++ let mut chars = input.chars().peekable();
++ match chars.next() {
++ Some(ch) if ch >= '0' && ch <= '9' => {}
++ _ => return Err(LexError),
++ }
++
++ let mut len = 1;
++ let mut has_dot = false;
++ let mut has_exp = false;
++ while let Some(&ch) = chars.peek() {
++ match ch {
++ '0'...'9' | '_' => {
++ chars.next();
++ len += 1;
++ }
++ '.' => {
++ if has_dot {
++ break;
++ }
++ chars.next();
++ if chars
++ .peek()
++ .map(|&ch| ch == '.' || UnicodeXID::is_xid_start(ch))
++ .unwrap_or(false)
++ {
++ return Err(LexError);
++ }
++ len += 1;
++ has_dot = true;
++ }
++ 'e' | 'E' => {
++ chars.next();
++ len += 1;
++ has_exp = true;
++ break;
++ }
++ _ => break,
++ }
++ }
++
++ let rest = input.advance(len);
++ if !(has_dot || has_exp || rest.starts_with("f32") || rest.starts_with("f64")) {
++ return Err(LexError);
++ }
++
++ if has_exp {
++ let mut has_exp_value = false;
++ while let Some(&ch) = chars.peek() {
++ match ch {
++ '+' | '-' => {
++ if has_exp_value {
++ break;
++ }
++ chars.next();
++ len += 1;
++ }
++ '0'...'9' => {
++ chars.next();
++ len += 1;
++ has_exp_value = true;
++ }
++ '_' => {
++ chars.next();
++ len += 1;
++ }
++ _ => break,
++ }
++ }
++ if !has_exp_value {
++ return Err(LexError);
++ }
++ }
++
++ Ok((input.advance(len), ()))
++}
++
++fn int(input: Cursor) -> PResult<()> {
++ let (rest, ()) = digits(input)?;
++ for suffix in &[
++ "isize", "i8", "i16", "i32", "i64", "i128", "usize", "u8", "u16", "u32", "u64", "u128",
++ ] {
++ if rest.starts_with(suffix) {
++ return word_break(rest.advance(suffix.len()));
++ }
++ }
++ word_break(rest)
++}
++
++fn digits(mut input: Cursor) -> PResult<()> {
++ let base = if input.starts_with("0x") {
++ input = input.advance(2);
++ 16
++ } else if input.starts_with("0o") {
++ input = input.advance(2);
++ 8
++ } else if input.starts_with("0b") {
++ input = input.advance(2);
++ 2
++ } else {
++ 10
++ };
++
++ let mut len = 0;
++ let mut empty = true;
++ for b in input.bytes() {
++ let digit = match b {
++ b'0'...b'9' => (b - b'0') as u64,
++ b'a'...b'f' => 10 + (b - b'a') as u64,
++ b'A'...b'F' => 10 + (b - b'A') as u64,
++ b'_' => {
++ if empty && base == 10 {
++ return Err(LexError);
++ }
++ len += 1;
++ continue;
++ }
++ _ => break,
++ };
++ if digit >= base {
++ return Err(LexError);
++ }
++ len += 1;
++ empty = false;
++ }
++ if empty {
++ Err(LexError)
++ } else {
++ Ok((input.advance(len), ()))
++ }
++}
++
++fn op(input: Cursor) -> PResult<Punct> {
++ let input = skip_whitespace(input);
++ match op_char(input) {
++ Ok((rest, '\'')) => {
++ symbol(rest)?;
++ Ok((rest, Punct::new('\'', Spacing::Joint)))
++ }
++ Ok((rest, ch)) => {
++ let kind = match op_char(rest) {
++ Ok(_) => Spacing::Joint,
++ Err(LexError) => Spacing::Alone,
++ };
++ Ok((rest, Punct::new(ch, kind)))
++ }
++ Err(LexError) => Err(LexError),
++ }
++}
++
++fn op_char(input: Cursor) -> PResult<char> {
++ if input.starts_with("//") || input.starts_with("/*") {
++ // Do not accept `/` of a comment as an op.
++ return Err(LexError);
++ }
++
++ let mut chars = input.chars();
++ let first = match chars.next() {
++ Some(ch) => ch,
++ None => {
++ return Err(LexError);
++ }
++ };
++ let recognized = "~!@#$%^&*-=+|;:,<.>/?'";
++ if recognized.contains(first) {
++ Ok((input.advance(first.len_utf8()), first))
++ } else {
++ Err(LexError)
++ }
++}
++
++fn doc_comment(input: Cursor) -> PResult<Vec<TokenTree>> {
++ let mut trees = Vec::new();
++ let (rest, ((comment, inner), span)) = spanned(input, doc_comment_contents)?;
++ trees.push(TokenTree::Punct(Punct::new('#', Spacing::Alone)));
++ if inner {
++ trees.push(Punct::new('!', Spacing::Alone).into());
++ }
++ let mut stream = vec![
++ TokenTree::Ident(::Ident::new("doc", span)),
++ TokenTree::Punct(Punct::new('=', Spacing::Alone)),
++ TokenTree::Literal(::Literal::string(comment)),
++ ];
++ for tt in stream.iter_mut() {
++ tt.set_span(span);
++ }
++ let group = Group::new(Delimiter::Bracket, stream.into_iter().collect());
++ trees.push(::Group::_new_stable(group).into());
++ for tt in trees.iter_mut() {
++ tt.set_span(span);
++ }
++ Ok((rest, trees))
++}
++
++named!(doc_comment_contents -> (&str, bool), alt!(
++ do_parse!(
++ punct!("//!") >>
++ s: take_until_newline_or_eof!() >>
++ ((s, true))
++ )
++ |
++ do_parse!(
++ option!(whitespace) >>
++ peek!(tag!("/*!")) >>
++ s: block_comment >>
++ ((s, true))
++ )
++ |
++ do_parse!(
++ punct!("///") >>
++ not!(tag!("/")) >>
++ s: take_until_newline_or_eof!() >>
++ ((s, false))
++ )
++ |
++ do_parse!(
++ option!(whitespace) >>
++ peek!(tuple!(tag!("/**"), not!(tag!("*")))) >>
++ s: block_comment >>
++ ((s, false))
++ )
++));
+diff --git a/third_party/rust/proc-macro2-0.4.27/src/lib.rs b/third_party/rust/proc-macro2-0.4.27/src/lib.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/src/lib.rs
+@@ -0,0 +1,1149 @@
++//! A wrapper around the procedural macro API of the compiler's [`proc_macro`]
++//! crate. This library serves three purposes:
++//!
++//! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/
++//!
++//! - **Bring proc-macro-like functionality to other contexts like build.rs and
++//! main.rs.** Types from `proc_macro` are entirely specific to procedural
++//! macros and cannot ever exist in code outside of a procedural macro.
++//! Meanwhile `proc_macro2` types may exist anywhere including non-macro code.
++//! By developing foundational libraries like [syn] and [quote] against
++//! `proc_macro2` rather than `proc_macro`, the procedural macro ecosystem
++//! becomes easily applicable to many other use cases and we avoid
++//! reimplementing non-macro equivalents of those libraries.
++//!
++//! - **Make procedural macros unit testable.** As a consequence of being
++//! specific to procedural macros, nothing that uses `proc_macro` can be
++//! executed from a unit test. In order for helper libraries or components of
++//! a macro to be testable in isolation, they must be implemented using
++//! `proc_macro2`.
++//!
++//! - **Provide the latest and greatest APIs across all compiler versions.**
++//! Procedural macros were first introduced to Rust in 1.15.0 with an
++//! extremely minimal interface. Since then, many improvements have landed to
++//! make macros more flexible and easier to write. This library tracks the
++//! procedural macro API of the most recent stable compiler but employs a
++//! polyfill to provide that API consistently across any compiler since
++//! 1.15.0.
++//!
++//! [syn]: https://github.com/dtolnay/syn
++//! [quote]: https://github.com/dtolnay/quote
++//!
++//! # Usage
++//!
++//! The skeleton of a typical procedural macro typically looks like this:
++//!
++//! ```edition2018
++//! extern crate proc_macro;
++//!
++//! # const IGNORE: &str = stringify! {
++//! #[proc_macro_derive(MyDerive)]
++//! # };
++//! pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
++//! let input = proc_macro2::TokenStream::from(input);
++//!
++//! let output: proc_macro2::TokenStream = {
++//! /* transform input */
++//! # input
++//! };
++//!
++//! proc_macro::TokenStream::from(output)
++//! }
++//! ```
++//!
++//! If parsing with [Syn], you'll use [`parse_macro_input!`] instead to
++//! propagate parse errors correctly back to the compiler when parsing fails.
++//!
++//! [`parse_macro_input!`]: https://docs.rs/syn/0.15/syn/macro.parse_macro_input.html
++//!
++//! # Unstable features
++//!
++//! The default feature set of proc-macro2 tracks the most recent stable
++//! compiler API. Functionality in `proc_macro` that is not yet stable is not
++//! exposed by proc-macro2 by default.
++//!
++//! To opt into the additional APIs available in the most recent nightly
++//! compiler, the `procmacro2_semver_exempt` config flag must be passed to
++//! rustc. As usual, we will polyfill those nightly-only APIs all the way back
++//! to Rust 1.15.0. As these are unstable APIs that track the nightly compiler,
++//! minor versions of proc-macro2 may make breaking changes to them at any time.
++//!
++//! ```sh
++//! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build
++//! ```
++//!
++//! Note that this must not only be done for your crate, but for any crate that
++//! depends on your crate. This infectious nature is intentional, as it serves
++//! as a reminder that you are outside of the normal semver guarantees.
++//!
++//! Semver exempt methods are marked as such in the proc-macro2 documentation.
++
++// Proc-macro2 types in rustdoc of other crates get linked to here.
++#![doc(html_root_url = "https://docs.rs/proc-macro2/0.4.27")]
++#![cfg_attr(nightly, feature(proc_macro_span))]
++#![cfg_attr(super_unstable, feature(proc_macro_raw_ident, proc_macro_def_site))]
++
++#[cfg(use_proc_macro)]
++extern crate proc_macro;
++extern crate unicode_xid;
++
++use std::cmp::Ordering;
++use std::fmt;
++use std::hash::{Hash, Hasher};
++use std::iter::FromIterator;
++use std::marker;
++#[cfg(procmacro2_semver_exempt)]
++use std::path::PathBuf;
++use std::rc::Rc;
++use std::str::FromStr;
++
++#[macro_use]
++mod strnom;
++mod fallback;
++
++#[cfg(not(wrap_proc_macro))]
++use fallback as imp;
++#[path = "wrapper.rs"]
++#[cfg(wrap_proc_macro)]
++mod imp;
++
++/// An abstract stream of tokens, or more concretely a sequence of token trees.
++///
++/// This type provides interfaces for iterating over token trees and for
++/// collecting token trees into one stream.
++///
++/// Token stream is both the input and output of `#[proc_macro]`,
++/// `#[proc_macro_attribute]` and `#[proc_macro_derive]` definitions.
++#[derive(Clone)]
++pub struct TokenStream {
++ inner: imp::TokenStream,
++ _marker: marker::PhantomData<Rc<()>>,
++}
++
++/// Error returned from `TokenStream::from_str`.
++pub struct LexError {
++ inner: imp::LexError,
++ _marker: marker::PhantomData<Rc<()>>,
++}
++
++impl TokenStream {
++ fn _new(inner: imp::TokenStream) -> TokenStream {
++ TokenStream {
++ inner: inner,
++ _marker: marker::PhantomData,
++ }
++ }
++
++ fn _new_stable(inner: fallback::TokenStream) -> TokenStream {
++ TokenStream {
++ inner: inner.into(),
++ _marker: marker::PhantomData,
++ }
++ }
++
++ /// Returns an empty `TokenStream` containing no token trees.
++ pub fn new() -> TokenStream {
++ TokenStream::_new(imp::TokenStream::new())
++ }
++
++ #[deprecated(since = "0.4.4", note = "please use TokenStream::new")]
++ pub fn empty() -> TokenStream {
++ TokenStream::new()
++ }
++
++ /// Checks if this `TokenStream` is empty.
++ pub fn is_empty(&self) -> bool {
++ self.inner.is_empty()
++ }
++}
++
++/// `TokenStream::default()` returns an empty stream,
++/// i.e. this is equivalent with `TokenStream::new()`.
++impl Default for TokenStream {
++ fn default() -> Self {
++ TokenStream::new()
++ }
++}
++
++/// Attempts to break the string into tokens and parse those tokens into a token
++/// stream.
++///
++/// May fail for a number of reasons, for example, if the string contains
++/// unbalanced delimiters or characters not existing in the language.
++///
++/// NOTE: Some errors may cause panics instead of returning `LexError`. We
++/// reserve the right to change these errors into `LexError`s later.
++impl FromStr for TokenStream {
++ type Err = LexError;
++
++ fn from_str(src: &str) -> Result<TokenStream, LexError> {
++ let e = src.parse().map_err(|e| LexError {
++ inner: e,
++ _marker: marker::PhantomData,
++ })?;
++ Ok(TokenStream::_new(e))
++ }
++}
++
++#[cfg(use_proc_macro)]
++impl From<proc_macro::TokenStream> for TokenStream {
++ fn from(inner: proc_macro::TokenStream) -> TokenStream {
++ TokenStream::_new(inner.into())
++ }
++}
++
++#[cfg(use_proc_macro)]
++impl From<TokenStream> for proc_macro::TokenStream {
++ fn from(inner: TokenStream) -> proc_macro::TokenStream {
++ inner.inner.into()
++ }
++}
++
++impl Extend<TokenTree> for TokenStream {
++ fn extend<I: IntoIterator<Item = TokenTree>>(&mut self, streams: I) {
++ self.inner.extend(streams)
++ }
++}
++
++impl Extend<TokenStream> for TokenStream {
++ fn extend<I: IntoIterator<Item = TokenStream>>(&mut self, streams: I) {
++ self.inner
++ .extend(streams.into_iter().map(|stream| stream.inner))
++ }
++}
++
++/// Collects a number of token trees into a single stream.
++impl FromIterator<TokenTree> for TokenStream {
++ fn from_iter<I: IntoIterator<Item = TokenTree>>(streams: I) -> Self {
++ TokenStream::_new(streams.into_iter().collect())
++ }
++}
++impl FromIterator<TokenStream> for TokenStream {
++ fn from_iter<I: IntoIterator<Item = TokenStream>>(streams: I) -> Self {
++ TokenStream::_new(streams.into_iter().map(|i| i.inner).collect())
++ }
++}
++
++/// Prints the token stream as a string that is supposed to be losslessly
++/// convertible back into the same token stream (modulo spans), except for
++/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative
++/// numeric literals.
++impl fmt::Display for TokenStream {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++/// Prints token in a form convenient for debugging.
++impl fmt::Debug for TokenStream {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++impl fmt::Debug for LexError {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++/// The source file of a given `Span`.
++///
++/// This type is semver exempt and not exposed by default.
++#[cfg(procmacro2_semver_exempt)]
++#[derive(Clone, PartialEq, Eq)]
++pub struct SourceFile {
++ inner: imp::SourceFile,
++ _marker: marker::PhantomData<Rc<()>>,
++}
++
++#[cfg(procmacro2_semver_exempt)]
++impl SourceFile {
++ fn _new(inner: imp::SourceFile) -> Self {
++ SourceFile {
++ inner: inner,
++ _marker: marker::PhantomData,
++ }
++ }
++
++ /// Get the path to this source file.
++ ///
++ /// ### Note
++ ///
++ /// If the code span associated with this `SourceFile` was generated by an
++ /// external macro, this may not be an actual path on the filesystem. Use
++ /// [`is_real`] to check.
++ ///
++ /// Also note that even if `is_real` returns `true`, if
++ /// `--remap-path-prefix` was passed on the command line, the path as given
++ /// may not actually be valid.
++ ///
++ /// [`is_real`]: #method.is_real
++ pub fn path(&self) -> PathBuf {
++ self.inner.path()
++ }
++
++ /// Returns `true` if this source file is a real source file, and not
++ /// generated by an external macro's expansion.
++ pub fn is_real(&self) -> bool {
++ self.inner.is_real()
++ }
++}
++
++#[cfg(procmacro2_semver_exempt)]
++impl fmt::Debug for SourceFile {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++/// A line-column pair representing the start or end of a `Span`.
++///
++/// This type is semver exempt and not exposed by default.
++#[cfg(span_locations)]
++pub struct LineColumn {
++ /// The 1-indexed line in the source file on which the span starts or ends
++ /// (inclusive).
++ pub line: usize,
++ /// The 0-indexed column (in UTF-8 characters) in the source file on which
++ /// the span starts or ends (inclusive).
++ pub column: usize,
++}
++
++/// A region of source code, along with macro expansion information.
++#[derive(Copy, Clone)]
++pub struct Span {
++ inner: imp::Span,
++ _marker: marker::PhantomData<Rc<()>>,
++}
++
++impl Span {
++ fn _new(inner: imp::Span) -> Span {
++ Span {
++ inner: inner,
++ _marker: marker::PhantomData,
++ }
++ }
++
++ fn _new_stable(inner: fallback::Span) -> Span {
++ Span {
++ inner: inner.into(),
++ _marker: marker::PhantomData,
++ }
++ }
++
++ /// The span of the invocation of the current procedural macro.
++ ///
++ /// Identifiers created with this span will be resolved as if they were
++ /// written directly at the macro call location (call-site hygiene) and
++ /// other code at the macro call site will be able to refer to them as well.
++ pub fn call_site() -> Span {
++ Span::_new(imp::Span::call_site())
++ }
++
++ /// A span that resolves at the macro definition site.
++ ///
++ /// This method is semver exempt and not exposed by default.
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn def_site() -> Span {
++ Span::_new(imp::Span::def_site())
++ }
++
++ /// Creates a new span with the same line/column information as `self` but
++ /// that resolves symbols as though it were at `other`.
++ ///
++ /// This method is semver exempt and not exposed by default.
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn resolved_at(&self, other: Span) -> Span {
++ Span::_new(self.inner.resolved_at(other.inner))
++ }
++
++ /// Creates a new span with the same name resolution behavior as `self` but
++ /// with the line/column information of `other`.
++ ///
++ /// This method is semver exempt and not exposed by default.
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn located_at(&self, other: Span) -> Span {
++ Span::_new(self.inner.located_at(other.inner))
++ }
++
++ /// Convert `proc_macro2::Span` to `proc_macro::Span`.
++ ///
++ /// This method is available when building with a nightly compiler, or when
++ /// building with rustc 1.29+ *without* semver exempt features.
++ ///
++ /// # Panics
++ ///
++ /// Panics if called from outside of a procedural macro. Unlike
++ /// `proc_macro2::Span`, the `proc_macro::Span` type can only exist within
++ /// the context of a procedural macro invocation.
++ #[cfg(wrap_proc_macro)]
++ pub fn unwrap(self) -> proc_macro::Span {
++ self.inner.unwrap()
++ }
++
++ // Soft deprecated. Please use Span::unwrap.
++ #[cfg(wrap_proc_macro)]
++ #[doc(hidden)]
++ pub fn unstable(self) -> proc_macro::Span {
++ self.unwrap()
++ }
++
++ /// The original source file into which this span points.
++ ///
++ /// This method is semver exempt and not exposed by default.
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn source_file(&self) -> SourceFile {
++ SourceFile::_new(self.inner.source_file())
++ }
++
++ /// Get the starting line/column in the source file for this span.
++ ///
++ /// This method requires the `"span-locations"` feature to be enabled.
++ #[cfg(span_locations)]
++ pub fn start(&self) -> LineColumn {
++ let imp::LineColumn { line, column } = self.inner.start();
++ LineColumn {
++ line: line,
++ column: column,
++ }
++ }
++
++ /// Get the ending line/column in the source file for this span.
++ ///
++ /// This method requires the `"span-locations"` feature to be enabled.
++ #[cfg(span_locations)]
++ pub fn end(&self) -> LineColumn {
++ let imp::LineColumn { line, column } = self.inner.end();
++ LineColumn {
++ line: line,
++ column: column,
++ }
++ }
++
++ /// Create a new span encompassing `self` and `other`.
++ ///
++ /// Returns `None` if `self` and `other` are from different files.
++ ///
++ /// This method is semver exempt and not exposed by default.
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn join(&self, other: Span) -> Option<Span> {
++ self.inner.join(other.inner).map(Span::_new)
++ }
++
++ /// Compares to spans to see if they're equal.
++ ///
++ /// This method is semver exempt and not exposed by default.
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn eq(&self, other: &Span) -> bool {
++ self.inner.eq(&other.inner)
++ }
++}
++
++/// Prints a span in a form convenient for debugging.
++impl fmt::Debug for Span {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++/// A single token or a delimited sequence of token trees (e.g. `[1, (), ..]`).
++#[derive(Clone)]
++pub enum TokenTree {
++ /// A token stream surrounded by bracket delimiters.
++ Group(Group),
++ /// An identifier.
++ Ident(Ident),
++ /// A single punctuation character (`+`, `,`, `$`, etc.).
++ Punct(Punct),
++ /// A literal character (`'a'`), string (`"hello"`), number (`2.3`), etc.
++ Literal(Literal),
++}
++
++impl TokenTree {
++ /// Returns the span of this tree, delegating to the `span` method of
++ /// the contained token or a delimited stream.
++ pub fn span(&self) -> Span {
++ match *self {
++ TokenTree::Group(ref t) => t.span(),
++ TokenTree::Ident(ref t) => t.span(),
++ TokenTree::Punct(ref t) => t.span(),
++ TokenTree::Literal(ref t) => t.span(),
++ }
++ }
++
++ /// Configures the span for *only this token*.
++ ///
++ /// Note that if this token is a `Group` then this method will not configure
++ /// the span of each of the internal tokens, this will simply delegate to
++ /// the `set_span` method of each variant.
++ pub fn set_span(&mut self, span: Span) {
++ match *self {
++ TokenTree::Group(ref mut t) => t.set_span(span),
++ TokenTree::Ident(ref mut t) => t.set_span(span),
++ TokenTree::Punct(ref mut t) => t.set_span(span),
++ TokenTree::Literal(ref mut t) => t.set_span(span),
++ }
++ }
++}
++
++impl From<Group> for TokenTree {
++ fn from(g: Group) -> TokenTree {
++ TokenTree::Group(g)
++ }
++}
++
++impl From<Ident> for TokenTree {
++ fn from(g: Ident) -> TokenTree {
++ TokenTree::Ident(g)
++ }
++}
++
++impl From<Punct> for TokenTree {
++ fn from(g: Punct) -> TokenTree {
++ TokenTree::Punct(g)
++ }
++}
++
++impl From<Literal> for TokenTree {
++ fn from(g: Literal) -> TokenTree {
++ TokenTree::Literal(g)
++ }
++}
++
++/// Prints the token tree as a string that is supposed to be losslessly
++/// convertible back into the same token tree (modulo spans), except for
++/// possibly `TokenTree::Group`s with `Delimiter::None` delimiters and negative
++/// numeric literals.
++impl fmt::Display for TokenTree {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match *self {
++ TokenTree::Group(ref t) => t.fmt(f),
++ TokenTree::Ident(ref t) => t.fmt(f),
++ TokenTree::Punct(ref t) => t.fmt(f),
++ TokenTree::Literal(ref t) => t.fmt(f),
++ }
++ }
++}
++
++/// Prints token tree in a form convenient for debugging.
++impl fmt::Debug for TokenTree {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ // Each of these has the name in the struct type in the derived debug,
++ // so don't bother with an extra layer of indirection
++ match *self {
++ TokenTree::Group(ref t) => t.fmt(f),
++ TokenTree::Ident(ref t) => {
++ let mut debug = f.debug_struct("Ident");
++ debug.field("sym", &format_args!("{}", t));
++ imp::debug_span_field_if_nontrivial(&mut debug, t.span().inner);
++ debug.finish()
++ }
++ TokenTree::Punct(ref t) => t.fmt(f),
++ TokenTree::Literal(ref t) => t.fmt(f),
++ }
++ }
++}
++
++/// A delimited token stream.
++///
++/// A `Group` internally contains a `TokenStream` which is surrounded by
++/// `Delimiter`s.
++#[derive(Clone)]
++pub struct Group {
++ inner: imp::Group,
++}
++
++/// Describes how a sequence of token trees is delimited.
++#[derive(Copy, Clone, Debug, Eq, PartialEq)]
++pub enum Delimiter {
++ /// `( ... )`
++ Parenthesis,
++ /// `{ ... }`
++ Brace,
++ /// `[ ... ]`
++ Bracket,
++ /// `Ø ... Ø`
++ ///
++ /// An implicit delimiter, that may, for example, appear around tokens
++ /// coming from a "macro variable" `$var`. It is important to preserve
++ /// operator priorities in cases like `$var * 3` where `$var` is `1 + 2`.
++ /// Implicit delimiters may not survive roundtrip of a token stream through
++ /// a string.
++ None,
++}
++
++impl Group {
++ fn _new(inner: imp::Group) -> Self {
++ Group { inner: inner }
++ }
++
++ fn _new_stable(inner: fallback::Group) -> Self {
++ Group {
++ inner: inner.into(),
++ }
++ }
++
++ /// Creates a new `Group` with the given delimiter and token stream.
++ ///
++ /// This constructor will set the span for this group to
++ /// `Span::call_site()`. To change the span you can use the `set_span`
++ /// method below.
++ pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group {
++ Group {
++ inner: imp::Group::new(delimiter, stream.inner),
++ }
++ }
++
++ /// Returns the delimiter of this `Group`
++ pub fn delimiter(&self) -> Delimiter {
++ self.inner.delimiter()
++ }
++
++ /// Returns the `TokenStream` of tokens that are delimited in this `Group`.
++ ///
++ /// Note that the returned token stream does not include the delimiter
++ /// returned above.
++ pub fn stream(&self) -> TokenStream {
++ TokenStream::_new(self.inner.stream())
++ }
++
++ /// Returns the span for the delimiters of this token stream, spanning the
++ /// entire `Group`.
++ ///
++ /// ```text
++ /// pub fn span(&self) -> Span {
++ /// ^^^^^^^
++ /// ```
++ pub fn span(&self) -> Span {
++ Span::_new(self.inner.span())
++ }
++
++ /// Returns the span pointing to the opening delimiter of this group.
++ ///
++ /// ```text
++ /// pub fn span_open(&self) -> Span {
++ /// ^
++ /// ```
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn span_open(&self) -> Span {
++ Span::_new(self.inner.span_open())
++ }
++
++ /// Returns the span pointing to the closing delimiter of this group.
++ ///
++ /// ```text
++ /// pub fn span_close(&self) -> Span {
++ /// ^
++ /// ```
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn span_close(&self) -> Span {
++ Span::_new(self.inner.span_close())
++ }
++
++ /// Configures the span for this `Group`'s delimiters, but not its internal
++ /// tokens.
++ ///
++ /// This method will **not** set the span of all the internal tokens spanned
++ /// by this group, but rather it will only set the span of the delimiter
++ /// tokens at the level of the `Group`.
++ pub fn set_span(&mut self, span: Span) {
++ self.inner.set_span(span.inner)
++ }
++}
++
++/// Prints the group as a string that should be losslessly convertible back
++/// into the same group (modulo spans), except for possibly `TokenTree::Group`s
++/// with `Delimiter::None` delimiters.
++impl fmt::Display for Group {
++ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
++ fmt::Display::fmt(&self.inner, formatter)
++ }
++}
++
++impl fmt::Debug for Group {
++ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
++ fmt::Debug::fmt(&self.inner, formatter)
++ }
++}
++
++/// An `Punct` is an single punctuation character like `+`, `-` or `#`.
++///
++/// Multicharacter operators like `+=` are represented as two instances of
++/// `Punct` with different forms of `Spacing` returned.
++#[derive(Clone)]
++pub struct Punct {
++ op: char,
++ spacing: Spacing,
++ span: Span,
++}
++
++/// Whether an `Punct` is followed immediately by another `Punct` or followed by
++/// another token or whitespace.
++#[derive(Copy, Clone, Debug, Eq, PartialEq)]
++pub enum Spacing {
++ /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`.
++ Alone,
++ /// E.g. `+` is `Joint` in `+=` or `'#`.
++ ///
++ /// Additionally, single quote `'` can join with identifiers to form
++ /// lifetimes `'ident`.
++ Joint,
++}
++
++impl Punct {
++ /// Creates a new `Punct` from the given character and spacing.
++ ///
++ /// The `ch` argument must be a valid punctuation character permitted by the
++ /// language, otherwise the function will panic.
++ ///
++ /// The returned `Punct` will have the default span of `Span::call_site()`
++ /// which can be further configured with the `set_span` method below.
++ pub fn new(op: char, spacing: Spacing) -> Punct {
++ Punct {
++ op: op,
++ spacing: spacing,
++ span: Span::call_site(),
++ }
++ }
++
++ /// Returns the value of this punctuation character as `char`.
++ pub fn as_char(&self) -> char {
++ self.op
++ }
++
++ /// Returns the spacing of this punctuation character, indicating whether
++ /// it's immediately followed by another `Punct` in the token stream, so
++ /// they can potentially be combined into a multicharacter operator
++ /// (`Joint`), or it's followed by some other token or whitespace (`Alone`)
++ /// so the operator has certainly ended.
++ pub fn spacing(&self) -> Spacing {
++ self.spacing
++ }
++
++ /// Returns the span for this punctuation character.
++ pub fn span(&self) -> Span {
++ self.span
++ }
++
++ /// Configure the span for this punctuation character.
++ pub fn set_span(&mut self, span: Span) {
++ self.span = span;
++ }
++}
++
++/// Prints the punctuation character as a string that should be losslessly
++/// convertible back into the same character.
++impl fmt::Display for Punct {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.op.fmt(f)
++ }
++}
++
++impl fmt::Debug for Punct {
++ fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
++ let mut debug = fmt.debug_struct("Punct");
++ debug.field("op", &self.op);
++ debug.field("spacing", &self.spacing);
++ imp::debug_span_field_if_nontrivial(&mut debug, self.span.inner);
++ debug.finish()
++ }
++}
++
++/// A word of Rust code, which may be a keyword or legal variable name.
++///
++/// An identifier consists of at least one Unicode code point, the first of
++/// which has the XID_Start property and the rest of which have the XID_Continue
++/// property.
++///
++/// - The empty string is not an identifier. Use `Option<Ident>`.
++/// - A lifetime is not an identifier. Use `syn::Lifetime` instead.
++///
++/// An identifier constructed with `Ident::new` is permitted to be a Rust
++/// keyword, though parsing one through its [`Parse`] implementation rejects
++/// Rust keywords. Use `input.call(Ident::parse_any)` when parsing to match the
++/// behaviour of `Ident::new`.
++///
++/// [`Parse`]: https://docs.rs/syn/0.15/syn/parse/trait.Parse.html
++///
++/// # Examples
++///
++/// A new ident can be created from a string using the `Ident::new` function.
++/// A span must be provided explicitly which governs the name resolution
++/// behavior of the resulting identifier.
++///
++/// ```edition2018
++/// use proc_macro2::{Ident, Span};
++///
++/// fn main() {
++/// let call_ident = Ident::new("calligraphy", Span::call_site());
++///
++/// println!("{}", call_ident);
++/// }
++/// ```
++///
++/// An ident can be interpolated into a token stream using the `quote!` macro.
++///
++/// ```edition2018
++/// use proc_macro2::{Ident, Span};
++/// use quote::quote;
++///
++/// fn main() {
++/// let ident = Ident::new("demo", Span::call_site());
++///
++/// // Create a variable binding whose name is this ident.
++/// let expanded = quote! { let #ident = 10; };
++///
++/// // Create a variable binding with a slightly different name.
++/// let temp_ident = Ident::new(&format!("new_{}", ident), Span::call_site());
++/// let expanded = quote! { let #temp_ident = 10; };
++/// }
++/// ```
++///
++/// A string representation of the ident is available through the `to_string()`
++/// method.
++///
++/// ```edition2018
++/// # use proc_macro2::{Ident, Span};
++/// #
++/// # let ident = Ident::new("another_identifier", Span::call_site());
++/// #
++/// // Examine the ident as a string.
++/// let ident_string = ident.to_string();
++/// if ident_string.len() > 60 {
++/// println!("Very long identifier: {}", ident_string)
++/// }
++/// ```
++#[derive(Clone)]
++pub struct Ident {
++ inner: imp::Ident,
++ _marker: marker::PhantomData<Rc<()>>,
++}
++
++impl Ident {
++ fn _new(inner: imp::Ident) -> Ident {
++ Ident {
++ inner: inner,
++ _marker: marker::PhantomData,
++ }
++ }
++
++ /// Creates a new `Ident` with the given `string` as well as the specified
++ /// `span`.
++ ///
++ /// The `string` argument must be a valid identifier permitted by the
++ /// language, otherwise the function will panic.
++ ///
++ /// Note that `span`, currently in rustc, configures the hygiene information
++ /// for this identifier.
++ ///
++ /// As of this time `Span::call_site()` explicitly opts-in to "call-site"
++ /// hygiene meaning that identifiers created with this span will be resolved
++ /// as if they were written directly at the location of the macro call, and
++ /// other code at the macro call site will be able to refer to them as well.
++ ///
++ /// Later spans like `Span::def_site()` will allow to opt-in to
++ /// "definition-site" hygiene meaning that identifiers created with this
++ /// span will be resolved at the location of the macro definition and other
++ /// code at the macro call site will not be able to refer to them.
++ ///
++ /// Due to the current importance of hygiene this constructor, unlike other
++ /// tokens, requires a `Span` to be specified at construction.
++ ///
++ /// # Panics
++ ///
++ /// Panics if the input string is neither a keyword nor a legal variable
++ /// name.
++ pub fn new(string: &str, span: Span) -> Ident {
++ Ident::_new(imp::Ident::new(string, span.inner))
++ }
++
++ /// Same as `Ident::new`, but creates a raw identifier (`r#ident`).
++ ///
++ /// This method is semver exempt and not exposed by default.
++ #[cfg(procmacro2_semver_exempt)]
++ pub fn new_raw(string: &str, span: Span) -> Ident {
++ Ident::_new_raw(string, span)
++ }
++
++ fn _new_raw(string: &str, span: Span) -> Ident {
++ Ident::_new(imp::Ident::new_raw(string, span.inner))
++ }
++
++ /// Returns the span of this `Ident`.
++ pub fn span(&self) -> Span {
++ Span::_new(self.inner.span())
++ }
++
++ /// Configures the span of this `Ident`, possibly changing its hygiene
++ /// context.
++ pub fn set_span(&mut self, span: Span) {
++ self.inner.set_span(span.inner);
++ }
++}
++
++impl PartialEq for Ident {
++ fn eq(&self, other: &Ident) -> bool {
++ self.inner == other.inner
++ }
++}
++
++impl<T> PartialEq<T> for Ident
++where
++ T: ?Sized + AsRef<str>,
++{
++ fn eq(&self, other: &T) -> bool {
++ self.inner == other
++ }
++}
++
++impl Eq for Ident {}
++
++impl PartialOrd for Ident {
++ fn partial_cmp(&self, other: &Ident) -> Option<Ordering> {
++ Some(self.cmp(other))
++ }
++}
++
++impl Ord for Ident {
++ fn cmp(&self, other: &Ident) -> Ordering {
++ self.to_string().cmp(&other.to_string())
++ }
++}
++
++impl Hash for Ident {
++ fn hash<H: Hasher>(&self, hasher: &mut H) {
++ self.to_string().hash(hasher)
++ }
++}
++
++/// Prints the identifier as a string that should be losslessly convertible back
++/// into the same identifier.
++impl fmt::Display for Ident {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++impl fmt::Debug for Ident {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++/// A literal string (`"hello"`), byte string (`b"hello"`), character (`'a'`),
++/// byte character (`b'a'`), an integer or floating point number with or without
++/// a suffix (`1`, `1u8`, `2.3`, `2.3f32`).
++///
++/// Boolean literals like `true` and `false` do not belong here, they are
++/// `Ident`s.
++#[derive(Clone)]
++pub struct Literal {
++ inner: imp::Literal,
++ _marker: marker::PhantomData<Rc<()>>,
++}
++
++macro_rules! suffixed_int_literals {
++ ($($name:ident => $kind:ident,)*) => ($(
++ /// Creates a new suffixed integer literal with the specified value.
++ ///
++ /// This function will create an integer like `1u32` where the integer
++ /// value specified is the first part of the token and the integral is
++ /// also suffixed at the end. Literals created from negative numbers may
++ /// not survive rountrips through `TokenStream` or strings and may be
++ /// broken into two tokens (`-` and positive literal).
++ ///
++ /// Literals created through this method have the `Span::call_site()`
++ /// span by default, which can be configured with the `set_span` method
++ /// below.
++ pub fn $name(n: $kind) -> Literal {
++ Literal::_new(imp::Literal::$name(n))
++ }
++ )*)
++}
++
++macro_rules! unsuffixed_int_literals {
++ ($($name:ident => $kind:ident,)*) => ($(
++ /// Creates a new unsuffixed integer literal with the specified value.
++ ///
++ /// This function will create an integer like `1` where the integer
++ /// value specified is the first part of the token. No suffix is
++ /// specified on this token, meaning that invocations like
++ /// `Literal::i8_unsuffixed(1)` are equivalent to
++ /// `Literal::u32_unsuffixed(1)`. Literals created from negative numbers
++ /// may not survive rountrips through `TokenStream` or strings and may
++ /// be broken into two tokens (`-` and positive literal).
++ ///
++ /// Literals created through this method have the `Span::call_site()`
++ /// span by default, which can be configured with the `set_span` method
++ /// below.
++ pub fn $name(n: $kind) -> Literal {
++ Literal::_new(imp::Literal::$name(n))
++ }
++ )*)
++}
++
++impl Literal {
++ fn _new(inner: imp::Literal) -> Literal {
++ Literal {
++ inner: inner,
++ _marker: marker::PhantomData,
++ }
++ }
++
++ fn _new_stable(inner: fallback::Literal) -> Literal {
++ Literal {
++ inner: inner.into(),
++ _marker: marker::PhantomData,
++ }
++ }
++
++ suffixed_int_literals! {
++ u8_suffixed => u8,
++ u16_suffixed => u16,
++ u32_suffixed => u32,
++ u64_suffixed => u64,
++ usize_suffixed => usize,
++ i8_suffixed => i8,
++ i16_suffixed => i16,
++ i32_suffixed => i32,
++ i64_suffixed => i64,
++ isize_suffixed => isize,
++ }
++
++ #[cfg(u128)]
++ suffixed_int_literals! {
++ u128_suffixed => u128,
++ i128_suffixed => i128,
++ }
++
++ unsuffixed_int_literals! {
++ u8_unsuffixed => u8,
++ u16_unsuffixed => u16,
++ u32_unsuffixed => u32,
++ u64_unsuffixed => u64,
++ usize_unsuffixed => usize,
++ i8_unsuffixed => i8,
++ i16_unsuffixed => i16,
++ i32_unsuffixed => i32,
++ i64_unsuffixed => i64,
++ isize_unsuffixed => isize,
++ }
++
++ #[cfg(u128)]
++ unsuffixed_int_literals! {
++ u128_unsuffixed => u128,
++ i128_unsuffixed => i128,
++ }
++
++ pub fn f64_unsuffixed(f: f64) -> Literal {
++ assert!(f.is_finite());
++ Literal::_new(imp::Literal::f64_unsuffixed(f))
++ }
++
++ pub fn f64_suffixed(f: f64) -> Literal {
++ assert!(f.is_finite());
++ Literal::_new(imp::Literal::f64_suffixed(f))
++ }
++
++ /// Creates a new unsuffixed floating-point literal.
++ ///
++ /// This constructor is similar to those like `Literal::i8_unsuffixed` where
++ /// the float's value is emitted directly into the token but no suffix is
++ /// used, so it may be inferred to be a `f64` later in the compiler.
++ /// Literals created from negative numbers may not survive rountrips through
++ /// `TokenStream` or strings and may be broken into two tokens (`-` and
++ /// positive literal).
++ ///
++ /// # Panics
++ ///
++ /// This function requires that the specified float is finite, for example
++ /// if it is infinity or NaN this function will panic.
++ pub fn f32_unsuffixed(f: f32) -> Literal {
++ assert!(f.is_finite());
++ Literal::_new(imp::Literal::f32_unsuffixed(f))
++ }
++
++ pub fn f32_suffixed(f: f32) -> Literal {
++ assert!(f.is_finite());
++ Literal::_new(imp::Literal::f32_suffixed(f))
++ }
++
++ pub fn string(string: &str) -> Literal {
++ Literal::_new(imp::Literal::string(string))
++ }
++
++ pub fn character(ch: char) -> Literal {
++ Literal::_new(imp::Literal::character(ch))
++ }
++
++ pub fn byte_string(s: &[u8]) -> Literal {
++ Literal::_new(imp::Literal::byte_string(s))
++ }
++
++ pub fn span(&self) -> Span {
++ Span::_new(self.inner.span())
++ }
++
++ pub fn set_span(&mut self, span: Span) {
++ self.inner.set_span(span.inner);
++ }
++}
++
++impl fmt::Debug for Literal {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++impl fmt::Display for Literal {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++}
++
++/// Public implementation details for the `TokenStream` type, such as iterators.
++pub mod token_stream {
++ use std::fmt;
++ use std::marker;
++ use std::rc::Rc;
++
++ use imp;
++ pub use TokenStream;
++ use TokenTree;
++
++ /// An iterator over `TokenStream`'s `TokenTree`s.
++ ///
++ /// The iteration is "shallow", e.g. the iterator doesn't recurse into
++ /// delimited groups, and returns whole groups as token trees.
++ pub struct IntoIter {
++ inner: imp::TokenTreeIter,
++ _marker: marker::PhantomData<Rc<()>>,
++ }
++
++ impl Iterator for IntoIter {
++ type Item = TokenTree;
++
++ fn next(&mut self) -> Option<TokenTree> {
++ self.inner.next()
++ }
++ }
++
++ impl fmt::Debug for IntoIter {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ self.inner.fmt(f)
++ }
++ }
++
++ impl IntoIterator for TokenStream {
++ type Item = TokenTree;
++ type IntoIter = IntoIter;
++
++ fn into_iter(self) -> IntoIter {
++ IntoIter {
++ inner: self.inner.into_iter(),
++ _marker: marker::PhantomData,
++ }
++ }
++ }
++}
+diff --git a/third_party/rust/proc-macro2-0.4.27/src/strnom.rs b/third_party/rust/proc-macro2-0.4.27/src/strnom.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/src/strnom.rs
+@@ -0,0 +1,393 @@
++//! Adapted from [`nom`](https://github.com/Geal/nom).
++
++use std::str::{Bytes, CharIndices, Chars};
++
++use unicode_xid::UnicodeXID;
++
++use fallback::LexError;
++
++#[derive(Copy, Clone, Eq, PartialEq)]
++pub struct Cursor<'a> {
++ pub rest: &'a str,
++ #[cfg(span_locations)]
++ pub off: u32,
++}
++
++impl<'a> Cursor<'a> {
++ #[cfg(not(span_locations))]
++ pub fn advance(&self, amt: usize) -> Cursor<'a> {
++ Cursor {
++ rest: &self.rest[amt..],
++ }
++ }
++ #[cfg(span_locations)]
++ pub fn advance(&self, amt: usize) -> Cursor<'a> {
++ Cursor {
++ rest: &self.rest[amt..],
++ off: self.off + (amt as u32),
++ }
++ }
++
++ pub fn find(&self, p: char) -> Option<usize> {
++ self.rest.find(p)
++ }
++
++ pub fn starts_with(&self, s: &str) -> bool {
++ self.rest.starts_with(s)
++ }
++
++ pub fn is_empty(&self) -> bool {
++ self.rest.is_empty()
++ }
++
++ pub fn len(&self) -> usize {
++ self.rest.len()
++ }
++
++ pub fn as_bytes(&self) -> &'a [u8] {
++ self.rest.as_bytes()
++ }
++
++ pub fn bytes(&self) -> Bytes<'a> {
++ self.rest.bytes()
++ }
++
++ pub fn chars(&self) -> Chars<'a> {
++ self.rest.chars()
++ }
++
++ pub fn char_indices(&self) -> CharIndices<'a> {
++ self.rest.char_indices()
++ }
++}
++
++pub type PResult<'a, O> = Result<(Cursor<'a>, O), LexError>;
++
++pub fn whitespace(input: Cursor) -> PResult<()> {
++ if input.is_empty() {
++ return Err(LexError);
++ }
++
++ let bytes = input.as_bytes();
++ let mut i = 0;
++ while i < bytes.len() {
++ let s = input.advance(i);
++ if bytes[i] == b'/' {
++ if s.starts_with("//")
++ && (!s.starts_with("///") || s.starts_with("////"))
++ && !s.starts_with("//!")
++ {
++ if let Some(len) = s.find('\n') {
++ i += len + 1;
++ continue;
++ }
++ break;
++ } else if s.starts_with("/**/") {
++ i += 4;
++ continue;
++ } else if s.starts_with("/*")
++ && (!s.starts_with("/**") || s.starts_with("/***"))
++ && !s.starts_with("/*!")
++ {
++ let (_, com) = block_comment(s)?;
++ i += com.len();
++ continue;
++ }
++ }
++ match bytes[i] {
++ b' ' | 0x09...0x0d => {
++ i += 1;
++ continue;
++ }
++ b if b <= 0x7f => {}
++ _ => {
++ let ch = s.chars().next().unwrap();
++ if is_whitespace(ch) {
++ i += ch.len_utf8();
++ continue;
++ }
++ }
++ }
++ return if i > 0 { Ok((s, ())) } else { Err(LexError) };
++ }
++ Ok((input.advance(input.len()), ()))
++}
++
++pub fn block_comment(input: Cursor) -> PResult<&str> {
++ if !input.starts_with("/*") {
++ return Err(LexError);
++ }
++
++ let mut depth = 0;
++ let bytes = input.as_bytes();
++ let mut i = 0;
++ let upper = bytes.len() - 1;
++ while i < upper {
++ if bytes[i] == b'/' && bytes[i + 1] == b'*' {
++ depth += 1;
++ i += 1; // eat '*'
++ } else if bytes[i] == b'*' && bytes[i + 1] == b'/' {
++ depth -= 1;
++ if depth == 0 {
++ return Ok((input.advance(i + 2), &input.rest[..i + 2]));
++ }
++ i += 1; // eat '/'
++ }
++ i += 1;
++ }
++ Err(LexError)
++}
++
++pub fn skip_whitespace(input: Cursor) -> Cursor {
++ match whitespace(input) {
++ Ok((rest, _)) => rest,
++ Err(LexError) => input,
++ }
++}
++
++fn is_whitespace(ch: char) -> bool {
++ // Rust treats left-to-right mark and right-to-left mark as whitespace
++ ch.is_whitespace() || ch == '\u{200e}' || ch == '\u{200f}'
++}
++
++pub fn word_break(input: Cursor) -> PResult<()> {
++ match input.chars().next() {
++ Some(ch) if UnicodeXID::is_xid_continue(ch) => Err(LexError),
++ Some(_) | None => Ok((input, ())),
++ }
++}
++
++macro_rules! named {
++ ($name:ident -> $o:ty, $submac:ident!( $($args:tt)* )) => {
++ fn $name<'a>(i: Cursor<'a>) -> $crate::strnom::PResult<'a, $o> {
++ $submac!(i, $($args)*)
++ }
++ };
++}
++
++macro_rules! alt {
++ ($i:expr, $e:ident | $($rest:tt)*) => {
++ alt!($i, call!($e) | $($rest)*)
++ };
++
++ ($i:expr, $subrule:ident!( $($args:tt)*) | $($rest:tt)*) => {
++ match $subrule!($i, $($args)*) {
++ res @ Ok(_) => res,
++ _ => alt!($i, $($rest)*)
++ }
++ };
++
++ ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr } | $($rest:tt)+) => {
++ match $subrule!($i, $($args)*) {
++ Ok((i, o)) => Ok((i, $gen(o))),
++ Err(LexError) => alt!($i, $($rest)*)
++ }
++ };
++
++ ($i:expr, $e:ident => { $gen:expr } | $($rest:tt)*) => {
++ alt!($i, call!($e) => { $gen } | $($rest)*)
++ };
++
++ ($i:expr, $e:ident => { $gen:expr }) => {
++ alt!($i, call!($e) => { $gen })
++ };
++
++ ($i:expr, $subrule:ident!( $($args:tt)* ) => { $gen:expr }) => {
++ match $subrule!($i, $($args)*) {
++ Ok((i, o)) => Ok((i, $gen(o))),
++ Err(LexError) => Err(LexError),
++ }
++ };
++
++ ($i:expr, $e:ident) => {
++ alt!($i, call!($e))
++ };
++
++ ($i:expr, $subrule:ident!( $($args:tt)*)) => {
++ $subrule!($i, $($args)*)
++ };
++}
++
++macro_rules! do_parse {
++ ($i:expr, ( $($rest:expr),* )) => {
++ Ok(($i, ( $($rest),* )))
++ };
++
++ ($i:expr, $e:ident >> $($rest:tt)*) => {
++ do_parse!($i, call!($e) >> $($rest)*)
++ };
++
++ ($i:expr, $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => {
++ match $submac!($i, $($args)*) {
++ Err(LexError) => Err(LexError),
++ Ok((i, _)) => do_parse!(i, $($rest)*),
++ }
++ };
++
++ ($i:expr, $field:ident : $e:ident >> $($rest:tt)*) => {
++ do_parse!($i, $field: call!($e) >> $($rest)*)
++ };
++
++ ($i:expr, $field:ident : $submac:ident!( $($args:tt)* ) >> $($rest:tt)*) => {
++ match $submac!($i, $($args)*) {
++ Err(LexError) => Err(LexError),
++ Ok((i, o)) => {
++ let $field = o;
++ do_parse!(i, $($rest)*)
++ },
++ }
++ };
++}
++
++macro_rules! peek {
++ ($i:expr, $submac:ident!( $($args:tt)* )) => {
++ match $submac!($i, $($args)*) {
++ Ok((_, o)) => Ok(($i, o)),
++ Err(LexError) => Err(LexError),
++ }
++ };
++}
++
++macro_rules! call {
++ ($i:expr, $fun:expr $(, $args:expr)*) => {
++ $fun($i $(, $args)*)
++ };
++}
++
++macro_rules! option {
++ ($i:expr, $f:expr) => {
++ match $f($i) {
++ Ok((i, o)) => Ok((i, Some(o))),
++ Err(LexError) => Ok(($i, None)),
++ }
++ };
++}
++
++macro_rules! take_until_newline_or_eof {
++ ($i:expr,) => {{
++ if $i.len() == 0 {
++ Ok(($i, ""))
++ } else {
++ match $i.find('\n') {
++ Some(i) => Ok(($i.advance(i), &$i.rest[..i])),
++ None => Ok(($i.advance($i.len()), &$i.rest[..$i.len()])),
++ }
++ }
++ }};
++}
++
++macro_rules! tuple {
++ ($i:expr, $($rest:tt)*) => {
++ tuple_parser!($i, (), $($rest)*)
++ };
++}
++
++/// Do not use directly. Use `tuple!`.
++macro_rules! tuple_parser {
++ ($i:expr, ($($parsed:tt),*), $e:ident, $($rest:tt)*) => {
++ tuple_parser!($i, ($($parsed),*), call!($e), $($rest)*)
++ };
++
++ ($i:expr, (), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => {
++ match $submac!($i, $($args)*) {
++ Err(LexError) => Err(LexError),
++ Ok((i, o)) => tuple_parser!(i, (o), $($rest)*),
++ }
++ };
++
++ ($i:expr, ($($parsed:tt)*), $submac:ident!( $($args:tt)* ), $($rest:tt)*) => {
++ match $submac!($i, $($args)*) {
++ Err(LexError) => Err(LexError),
++ Ok((i, o)) => tuple_parser!(i, ($($parsed)* , o), $($rest)*),
++ }
++ };
++
++ ($i:expr, ($($parsed:tt),*), $e:ident) => {
++ tuple_parser!($i, ($($parsed),*), call!($e))
++ };
++
++ ($i:expr, (), $submac:ident!( $($args:tt)* )) => {
++ $submac!($i, $($args)*)
++ };
++
++ ($i:expr, ($($parsed:expr),*), $submac:ident!( $($args:tt)* )) => {
++ match $submac!($i, $($args)*) {
++ Err(LexError) => Err(LexError),
++ Ok((i, o)) => Ok((i, ($($parsed),*, o)))
++ }
++ };
++
++ ($i:expr, ($($parsed:expr),*)) => {
++ Ok(($i, ($($parsed),*)))
++ };
++}
++
++macro_rules! not {
++ ($i:expr, $submac:ident!( $($args:tt)* )) => {
++ match $submac!($i, $($args)*) {
++ Ok((_, _)) => Err(LexError),
++ Err(LexError) => Ok(($i, ())),
++ }
++ };
++}
++
++macro_rules! tag {
++ ($i:expr, $tag:expr) => {
++ if $i.starts_with($tag) {
++ Ok(($i.advance($tag.len()), &$i.rest[..$tag.len()]))
++ } else {
++ Err(LexError)
++ }
++ };
++}
++
++macro_rules! punct {
++ ($i:expr, $punct:expr) => {
++ $crate::strnom::punct($i, $punct)
++ };
++}
++
++/// Do not use directly. Use `punct!`.
++pub fn punct<'a>(input: Cursor<'a>, token: &'static str) -> PResult<'a, &'a str> {
++ let input = skip_whitespace(input);
++ if input.starts_with(token) {
++ Ok((input.advance(token.len()), token))
++ } else {
++ Err(LexError)
++ }
++}
++
++macro_rules! preceded {
++ ($i:expr, $submac:ident!( $($args:tt)* ), $submac2:ident!( $($args2:tt)* )) => {
++ match tuple!($i, $submac!($($args)*), $submac2!($($args2)*)) {
++ Ok((remaining, (_, o))) => Ok((remaining, o)),
++ Err(LexError) => Err(LexError),
++ }
++ };
++
++ ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => {
++ preceded!($i, $submac!($($args)*), call!($g))
++ };
++}
++
++macro_rules! delimited {
++ ($i:expr, $submac:ident!( $($args:tt)* ), $($rest:tt)+) => {
++ match tuple_parser!($i, (), $submac!($($args)*), $($rest)*) {
++ Err(LexError) => Err(LexError),
++ Ok((i1, (_, o, _))) => Ok((i1, o))
++ }
++ };
++}
++
++macro_rules! map {
++ ($i:expr, $submac:ident!( $($args:tt)* ), $g:expr) => {
++ match $submac!($i, $($args)*) {
++ Err(LexError) => Err(LexError),
++ Ok((i, o)) => Ok((i, call!(o, $g)))
++ }
++ };
++
++ ($i:expr, $f:expr, $g:expr) => {
++ map!($i, call!($f), $g)
++ };
++}
+diff --git a/third_party/rust/proc-macro2-0.4.27/src/wrapper.rs b/third_party/rust/proc-macro2-0.4.27/src/wrapper.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/src/wrapper.rs
+@@ -0,0 +1,926 @@
++use std::fmt;
++use std::iter;
++use std::panic::{self, PanicInfo};
++#[cfg(super_unstable)]
++use std::path::PathBuf;
++use std::str::FromStr;
++
++use fallback;
++use proc_macro;
++
++use {Delimiter, Punct, Spacing, TokenTree};
++
++#[derive(Clone)]
++pub enum TokenStream {
++ Compiler(proc_macro::TokenStream),
++ Fallback(fallback::TokenStream),
++}
++
++pub enum LexError {
++ Compiler(proc_macro::LexError),
++ Fallback(fallback::LexError),
++}
++
++fn nightly_works() -> bool {
++ use std::sync::atomic::*;
++ use std::sync::Once;
++
++ static WORKS: AtomicUsize = ATOMIC_USIZE_INIT;
++ static INIT: Once = Once::new();
++
++ match WORKS.load(Ordering::SeqCst) {
++ 1 => return false,
++ 2 => return true,
++ _ => {}
++ }
++
++ // Swap in a null panic hook to avoid printing "thread panicked" to stderr,
++ // then use catch_unwind to determine whether the compiler's proc_macro is
++ // working. When proc-macro2 is used from outside of a procedural macro all
++ // of the proc_macro crate's APIs currently panic.
++ //
++ // The Once is to prevent the possibility of this ordering:
++ //
++ // thread 1 calls take_hook, gets the user's original hook
++ // thread 1 calls set_hook with the null hook
++ // thread 2 calls take_hook, thinks null hook is the original hook
++ // thread 2 calls set_hook with the null hook
++ // thread 1 calls set_hook with the actual original hook
++ // thread 2 calls set_hook with what it thinks is the original hook
++ //
++ // in which the user's hook has been lost.
++ //
++ // There is still a race condition where a panic in a different thread can
++ // happen during the interval that the user's original panic hook is
++ // unregistered such that their hook is incorrectly not called. This is
++ // sufficiently unlikely and less bad than printing panic messages to stderr
++ // on correct use of this crate. Maybe there is a libstd feature request
++ // here. For now, if a user needs to guarantee that this failure mode does
++ // not occur, they need to call e.g. `proc_macro2::Span::call_site()` from
++ // the main thread before launching any other threads.
++ INIT.call_once(|| {
++ type PanicHook = Fn(&PanicInfo) + Sync + Send + 'static;
++
++ let null_hook: Box<PanicHook> = Box::new(|_panic_info| { /* ignore */ });
++ let sanity_check = &*null_hook as *const PanicHook;
++ let original_hook = panic::take_hook();
++ panic::set_hook(null_hook);
++
++ let works = panic::catch_unwind(|| proc_macro::Span::call_site()).is_ok();
++ WORKS.store(works as usize + 1, Ordering::SeqCst);
++
++ let hopefully_null_hook = panic::take_hook();
++ panic::set_hook(original_hook);
++ if sanity_check != &*hopefully_null_hook {
++ panic!("observed race condition in proc_macro2::nightly_works");
++ }
++ });
++ nightly_works()
++}
++
++fn mismatch() -> ! {
++ panic!("stable/nightly mismatch")
++}
++
++impl TokenStream {
++ pub fn new() -> TokenStream {
++ if nightly_works() {
++ TokenStream::Compiler(proc_macro::TokenStream::new())
++ } else {
++ TokenStream::Fallback(fallback::TokenStream::new())
++ }
++ }
++
++ pub fn is_empty(&self) -> bool {
++ match self {
++ TokenStream::Compiler(tts) => tts.is_empty(),
++ TokenStream::Fallback(tts) => tts.is_empty(),
++ }
++ }
++
++ fn unwrap_nightly(self) -> proc_macro::TokenStream {
++ match self {
++ TokenStream::Compiler(s) => s,
++ TokenStream::Fallback(_) => mismatch(),
++ }
++ }
++
++ fn unwrap_stable(self) -> fallback::TokenStream {
++ match self {
++ TokenStream::Compiler(_) => mismatch(),
++ TokenStream::Fallback(s) => s,
++ }
++ }
++}
++
++impl FromStr for TokenStream {
++ type Err = LexError;
++
++ fn from_str(src: &str) -> Result<TokenStream, LexError> {
++ if nightly_works() {
++ Ok(TokenStream::Compiler(src.parse()?))
++ } else {
++ Ok(TokenStream::Fallback(src.parse()?))
++ }
++ }
++}
++
++impl fmt::Display for TokenStream {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ TokenStream::Compiler(tts) => tts.fmt(f),
++ TokenStream::Fallback(tts) => tts.fmt(f),
++ }
++ }
++}
++
++impl From<proc_macro::TokenStream> for TokenStream {
++ fn from(inner: proc_macro::TokenStream) -> TokenStream {
++ TokenStream::Compiler(inner)
++ }
++}
++
++impl From<TokenStream> for proc_macro::TokenStream {
++ fn from(inner: TokenStream) -> proc_macro::TokenStream {
++ match inner {
++ TokenStream::Compiler(inner) => inner,
++ TokenStream::Fallback(inner) => inner.to_string().parse().unwrap(),
++ }
++ }
++}
++
++impl From<fallback::TokenStream> for TokenStream {
++ fn from(inner: fallback::TokenStream) -> TokenStream {
++ TokenStream::Fallback(inner)
++ }
++}
++
++impl From<TokenTree> for TokenStream {
++ fn from(token: TokenTree) -> TokenStream {
++ if !nightly_works() {
++ return TokenStream::Fallback(token.into());
++ }
++ let tt: proc_macro::TokenTree = match token {
++ TokenTree::Group(tt) => tt.inner.unwrap_nightly().into(),
++ TokenTree::Punct(tt) => {
++ let spacing = match tt.spacing() {
++ Spacing::Joint => proc_macro::Spacing::Joint,
++ Spacing::Alone => proc_macro::Spacing::Alone,
++ };
++ let mut op = proc_macro::Punct::new(tt.as_char(), spacing);
++ op.set_span(tt.span().inner.unwrap_nightly());
++ op.into()
++ }
++ TokenTree::Ident(tt) => tt.inner.unwrap_nightly().into(),
++ TokenTree::Literal(tt) => tt.inner.unwrap_nightly().into(),
++ };
++ TokenStream::Compiler(tt.into())
++ }
++}
++
++impl iter::FromIterator<TokenTree> for TokenStream {
++ fn from_iter<I: IntoIterator<Item = TokenTree>>(trees: I) -> Self {
++ if nightly_works() {
++ let trees = trees
++ .into_iter()
++ .map(TokenStream::from)
++ .flat_map(|t| match t {
++ TokenStream::Compiler(s) => s,
++ TokenStream::Fallback(_) => mismatch(),
++ });
++ TokenStream::Compiler(trees.collect())
++ } else {
++ TokenStream::Fallback(trees.into_iter().collect())
++ }
++ }
++}
++
++impl iter::FromIterator<TokenStream> for TokenStream {
++ fn from_iter<I: IntoIterator<Item = TokenStream>>(streams: I) -> Self {
++ let mut streams = streams.into_iter();
++ match streams.next() {
++ #[cfg(slow_extend)]
++ Some(TokenStream::Compiler(first)) => {
++ let stream = iter::once(first)
++ .chain(streams.map(|s| match s {
++ TokenStream::Compiler(s) => s,
++ TokenStream::Fallback(_) => mismatch(),
++ }))
++ .collect();
++ TokenStream::Compiler(stream)
++ }
++ #[cfg(not(slow_extend))]
++ Some(TokenStream::Compiler(mut first)) => {
++ first.extend(streams.map(|s| match s {
++ TokenStream::Compiler(s) => s,
++ TokenStream::Fallback(_) => mismatch(),
++ }));
++ TokenStream::Compiler(first)
++ }
++ Some(TokenStream::Fallback(mut first)) => {
++ first.extend(streams.map(|s| match s {
++ TokenStream::Fallback(s) => s,
++ TokenStream::Compiler(_) => mismatch(),
++ }));
++ TokenStream::Fallback(first)
++ }
++ None => TokenStream::new(),
++ }
++ }
++}
++
++impl Extend<TokenTree> for TokenStream {
++ fn extend<I: IntoIterator<Item = TokenTree>>(&mut self, streams: I) {
++ match self {
++ TokenStream::Compiler(tts) => {
++ #[cfg(not(slow_extend))]
++ {
++ tts.extend(
++ streams
++ .into_iter()
++ .map(|t| TokenStream::from(t).unwrap_nightly()),
++ );
++ }
++ #[cfg(slow_extend)]
++ {
++ *tts =
++ tts.clone()
++ .into_iter()
++ .chain(streams.into_iter().map(TokenStream::from).flat_map(
++ |t| match t {
++ TokenStream::Compiler(tts) => tts.into_iter(),
++ _ => mismatch(),
++ },
++ ))
++ .collect();
++ }
++ }
++ TokenStream::Fallback(tts) => tts.extend(streams),
++ }
++ }
++}
++
++impl Extend<TokenStream> for TokenStream {
++ fn extend<I: IntoIterator<Item = TokenStream>>(&mut self, streams: I) {
++ match self {
++ TokenStream::Compiler(tts) => {
++ #[cfg(not(slow_extend))]
++ {
++ tts.extend(streams.into_iter().map(|stream| stream.unwrap_nightly()));
++ }
++ #[cfg(slow_extend)]
++ {
++ *tts = tts
++ .clone()
++ .into_iter()
++ .chain(streams.into_iter().flat_map(|t| match t {
++ TokenStream::Compiler(tts) => tts.into_iter(),
++ _ => mismatch(),
++ }))
++ .collect();
++ }
++ }
++ TokenStream::Fallback(tts) => {
++ tts.extend(streams.into_iter().map(|stream| stream.unwrap_stable()))
++ }
++ }
++ }
++}
++
++impl fmt::Debug for TokenStream {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ TokenStream::Compiler(tts) => tts.fmt(f),
++ TokenStream::Fallback(tts) => tts.fmt(f),
++ }
++ }
++}
++
++impl From<proc_macro::LexError> for LexError {
++ fn from(e: proc_macro::LexError) -> LexError {
++ LexError::Compiler(e)
++ }
++}
++
++impl From<fallback::LexError> for LexError {
++ fn from(e: fallback::LexError) -> LexError {
++ LexError::Fallback(e)
++ }
++}
++
++impl fmt::Debug for LexError {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ LexError::Compiler(e) => e.fmt(f),
++ LexError::Fallback(e) => e.fmt(f),
++ }
++ }
++}
++
++pub enum TokenTreeIter {
++ Compiler(proc_macro::token_stream::IntoIter),
++ Fallback(fallback::TokenTreeIter),
++}
++
++impl IntoIterator for TokenStream {
++ type Item = TokenTree;
++ type IntoIter = TokenTreeIter;
++
++ fn into_iter(self) -> TokenTreeIter {
++ match self {
++ TokenStream::Compiler(tts) => TokenTreeIter::Compiler(tts.into_iter()),
++ TokenStream::Fallback(tts) => TokenTreeIter::Fallback(tts.into_iter()),
++ }
++ }
++}
++
++impl Iterator for TokenTreeIter {
++ type Item = TokenTree;
++
++ fn next(&mut self) -> Option<TokenTree> {
++ let token = match self {
++ TokenTreeIter::Compiler(iter) => iter.next()?,
++ TokenTreeIter::Fallback(iter) => return iter.next(),
++ };
++ Some(match token {
++ proc_macro::TokenTree::Group(tt) => ::Group::_new(Group::Compiler(tt)).into(),
++ proc_macro::TokenTree::Punct(tt) => {
++ let spacing = match tt.spacing() {
++ proc_macro::Spacing::Joint => Spacing::Joint,
++ proc_macro::Spacing::Alone => Spacing::Alone,
++ };
++ let mut o = Punct::new(tt.as_char(), spacing);
++ o.set_span(::Span::_new(Span::Compiler(tt.span())));
++ o.into()
++ }
++ proc_macro::TokenTree::Ident(s) => ::Ident::_new(Ident::Compiler(s)).into(),
++ proc_macro::TokenTree::Literal(l) => ::Literal::_new(Literal::Compiler(l)).into(),
++ })
++ }
++
++ fn size_hint(&self) -> (usize, Option<usize>) {
++ match self {
++ TokenTreeIter::Compiler(tts) => tts.size_hint(),
++ TokenTreeIter::Fallback(tts) => tts.size_hint(),
++ }
++ }
++}
++
++impl fmt::Debug for TokenTreeIter {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ f.debug_struct("TokenTreeIter").finish()
++ }
++}
++
++#[derive(Clone, PartialEq, Eq)]
++#[cfg(super_unstable)]
++pub enum SourceFile {
++ Compiler(proc_macro::SourceFile),
++ Fallback(fallback::SourceFile),
++}
++
++#[cfg(super_unstable)]
++impl SourceFile {
++ fn nightly(sf: proc_macro::SourceFile) -> Self {
++ SourceFile::Compiler(sf)
++ }
++
++ /// Get the path to this source file as a string.
++ pub fn path(&self) -> PathBuf {
++ match self {
++ SourceFile::Compiler(a) => a.path(),
++ SourceFile::Fallback(a) => a.path(),
++ }
++ }
++
++ pub fn is_real(&self) -> bool {
++ match self {
++ SourceFile::Compiler(a) => a.is_real(),
++ SourceFile::Fallback(a) => a.is_real(),
++ }
++ }
++}
++
++#[cfg(super_unstable)]
++impl fmt::Debug for SourceFile {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ SourceFile::Compiler(a) => a.fmt(f),
++ SourceFile::Fallback(a) => a.fmt(f),
++ }
++ }
++}
++
++#[cfg(any(super_unstable, feature = "span-locations"))]
++pub struct LineColumn {
++ pub line: usize,
++ pub column: usize,
++}
++
++#[derive(Copy, Clone)]
++pub enum Span {
++ Compiler(proc_macro::Span),
++ Fallback(fallback::Span),
++}
++
++impl Span {
++ pub fn call_site() -> Span {
++ if nightly_works() {
++ Span::Compiler(proc_macro::Span::call_site())
++ } else {
++ Span::Fallback(fallback::Span::call_site())
++ }
++ }
++
++ #[cfg(super_unstable)]
++ pub fn def_site() -> Span {
++ if nightly_works() {
++ Span::Compiler(proc_macro::Span::def_site())
++ } else {
++ Span::Fallback(fallback::Span::def_site())
++ }
++ }
++
++ #[cfg(super_unstable)]
++ pub fn resolved_at(&self, other: Span) -> Span {
++ match (self, other) {
++ (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.resolved_at(b)),
++ (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.resolved_at(b)),
++ _ => mismatch(),
++ }
++ }
++
++ #[cfg(super_unstable)]
++ pub fn located_at(&self, other: Span) -> Span {
++ match (self, other) {
++ (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.located_at(b)),
++ (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.located_at(b)),
++ _ => mismatch(),
++ }
++ }
++
++ pub fn unwrap(self) -> proc_macro::Span {
++ match self {
++ Span::Compiler(s) => s,
++ Span::Fallback(_) => panic!("proc_macro::Span is only available in procedural macros"),
++ }
++ }
++
++ #[cfg(super_unstable)]
++ pub fn source_file(&self) -> SourceFile {
++ match self {
++ Span::Compiler(s) => SourceFile::nightly(s.source_file()),
++ Span::Fallback(s) => SourceFile::Fallback(s.source_file()),
++ }
++ }
++
++ #[cfg(any(super_unstable, feature = "span-locations"))]
++ pub fn start(&self) -> LineColumn {
++ match self {
++ #[cfg(nightly)]
++ Span::Compiler(s) => {
++ let proc_macro::LineColumn { line, column } = s.start();
++ LineColumn { line, column }
++ }
++ #[cfg(not(nightly))]
++ Span::Compiler(_) => LineColumn { line: 0, column: 0 },
++ Span::Fallback(s) => {
++ let fallback::LineColumn { line, column } = s.start();
++ LineColumn { line, column }
++ }
++ }
++ }
++
++ #[cfg(any(super_unstable, feature = "span-locations"))]
++ pub fn end(&self) -> LineColumn {
++ match self {
++ #[cfg(nightly)]
++ Span::Compiler(s) => {
++ let proc_macro::LineColumn { line, column } = s.end();
++ LineColumn { line, column }
++ }
++ #[cfg(not(nightly))]
++ Span::Compiler(_) => LineColumn { line: 0, column: 0 },
++ Span::Fallback(s) => {
++ let fallback::LineColumn { line, column } = s.end();
++ LineColumn { line, column }
++ }
++ }
++ }
++
++ #[cfg(super_unstable)]
++ pub fn join(&self, other: Span) -> Option<Span> {
++ let ret = match (self, other) {
++ (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.join(b)?),
++ (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.join(b)?),
++ _ => return None,
++ };
++ Some(ret)
++ }
++
++ #[cfg(super_unstable)]
++ pub fn eq(&self, other: &Span) -> bool {
++ match (self, other) {
++ (Span::Compiler(a), Span::Compiler(b)) => a.eq(b),
++ (Span::Fallback(a), Span::Fallback(b)) => a.eq(b),
++ _ => false,
++ }
++ }
++
++ fn unwrap_nightly(self) -> proc_macro::Span {
++ match self {
++ Span::Compiler(s) => s,
++ Span::Fallback(_) => mismatch(),
++ }
++ }
++}
++
++impl From<proc_macro::Span> for ::Span {
++ fn from(proc_span: proc_macro::Span) -> ::Span {
++ ::Span::_new(Span::Compiler(proc_span))
++ }
++}
++
++impl From<fallback::Span> for Span {
++ fn from(inner: fallback::Span) -> Span {
++ Span::Fallback(inner)
++ }
++}
++
++impl fmt::Debug for Span {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ Span::Compiler(s) => s.fmt(f),
++ Span::Fallback(s) => s.fmt(f),
++ }
++ }
++}
++
++pub fn debug_span_field_if_nontrivial(debug: &mut fmt::DebugStruct, span: Span) {
++ match span {
++ Span::Compiler(s) => {
++ debug.field("span", &s);
++ }
++ Span::Fallback(s) => fallback::debug_span_field_if_nontrivial(debug, s),
++ }
++}
++
++#[derive(Clone)]
++pub enum Group {
++ Compiler(proc_macro::Group),
++ Fallback(fallback::Group),
++}
++
++impl Group {
++ pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group {
++ match stream {
++ TokenStream::Compiler(stream) => {
++ let delimiter = match delimiter {
++ Delimiter::Parenthesis => proc_macro::Delimiter::Parenthesis,
++ Delimiter::Bracket => proc_macro::Delimiter::Bracket,
++ Delimiter::Brace => proc_macro::Delimiter::Brace,
++ Delimiter::None => proc_macro::Delimiter::None,
++ };
++ Group::Compiler(proc_macro::Group::new(delimiter, stream))
++ }
++ TokenStream::Fallback(stream) => {
++ Group::Fallback(fallback::Group::new(delimiter, stream))
++ }
++ }
++ }
++
++ pub fn delimiter(&self) -> Delimiter {
++ match self {
++ Group::Compiler(g) => match g.delimiter() {
++ proc_macro::Delimiter::Parenthesis => Delimiter::Parenthesis,
++ proc_macro::Delimiter::Bracket => Delimiter::Bracket,
++ proc_macro::Delimiter::Brace => Delimiter::Brace,
++ proc_macro::Delimiter::None => Delimiter::None,
++ },
++ Group::Fallback(g) => g.delimiter(),
++ }
++ }
++
++ pub fn stream(&self) -> TokenStream {
++ match self {
++ Group::Compiler(g) => TokenStream::Compiler(g.stream()),
++ Group::Fallback(g) => TokenStream::Fallback(g.stream()),
++ }
++ }
++
++ pub fn span(&self) -> Span {
++ match self {
++ Group::Compiler(g) => Span::Compiler(g.span()),
++ Group::Fallback(g) => Span::Fallback(g.span()),
++ }
++ }
++
++ #[cfg(super_unstable)]
++ pub fn span_open(&self) -> Span {
++ match self {
++ Group::Compiler(g) => Span::Compiler(g.span_open()),
++ Group::Fallback(g) => Span::Fallback(g.span_open()),
++ }
++ }
++
++ #[cfg(super_unstable)]
++ pub fn span_close(&self) -> Span {
++ match self {
++ Group::Compiler(g) => Span::Compiler(g.span_close()),
++ Group::Fallback(g) => Span::Fallback(g.span_close()),
++ }
++ }
++
++ pub fn set_span(&mut self, span: Span) {
++ match (self, span) {
++ (Group::Compiler(g), Span::Compiler(s)) => g.set_span(s),
++ (Group::Fallback(g), Span::Fallback(s)) => g.set_span(s),
++ _ => mismatch(),
++ }
++ }
++
++ fn unwrap_nightly(self) -> proc_macro::Group {
++ match self {
++ Group::Compiler(g) => g,
++ Group::Fallback(_) => mismatch(),
++ }
++ }
++}
++
++impl From<fallback::Group> for Group {
++ fn from(g: fallback::Group) -> Self {
++ Group::Fallback(g)
++ }
++}
++
++impl fmt::Display for Group {
++ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ Group::Compiler(group) => group.fmt(formatter),
++ Group::Fallback(group) => group.fmt(formatter),
++ }
++ }
++}
++
++impl fmt::Debug for Group {
++ fn fmt(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ Group::Compiler(group) => group.fmt(formatter),
++ Group::Fallback(group) => group.fmt(formatter),
++ }
++ }
++}
++
++#[derive(Clone)]
++pub enum Ident {
++ Compiler(proc_macro::Ident),
++ Fallback(fallback::Ident),
++}
++
++impl Ident {
++ pub fn new(string: &str, span: Span) -> Ident {
++ match span {
++ Span::Compiler(s) => Ident::Compiler(proc_macro::Ident::new(string, s)),
++ Span::Fallback(s) => Ident::Fallback(fallback::Ident::new(string, s)),
++ }
++ }
++
++ pub fn new_raw(string: &str, span: Span) -> Ident {
++ match span {
++ Span::Compiler(s) => {
++ let p: proc_macro::TokenStream = string.parse().unwrap();
++ let ident = match p.into_iter().next() {
++ Some(proc_macro::TokenTree::Ident(mut i)) => {
++ i.set_span(s);
++ i
++ }
++ _ => panic!(),
++ };
++ Ident::Compiler(ident)
++ }
++ Span::Fallback(s) => Ident::Fallback(fallback::Ident::new_raw(string, s)),
++ }
++ }
++
++ pub fn span(&self) -> Span {
++ match self {
++ Ident::Compiler(t) => Span::Compiler(t.span()),
++ Ident::Fallback(t) => Span::Fallback(t.span()),
++ }
++ }
++
++ pub fn set_span(&mut self, span: Span) {
++ match (self, span) {
++ (Ident::Compiler(t), Span::Compiler(s)) => t.set_span(s),
++ (Ident::Fallback(t), Span::Fallback(s)) => t.set_span(s),
++ _ => mismatch(),
++ }
++ }
++
++ fn unwrap_nightly(self) -> proc_macro::Ident {
++ match self {
++ Ident::Compiler(s) => s,
++ Ident::Fallback(_) => mismatch(),
++ }
++ }
++}
++
++impl PartialEq for Ident {
++ fn eq(&self, other: &Ident) -> bool {
++ match (self, other) {
++ (Ident::Compiler(t), Ident::Compiler(o)) => t.to_string() == o.to_string(),
++ (Ident::Fallback(t), Ident::Fallback(o)) => t == o,
++ _ => mismatch(),
++ }
++ }
++}
++
++impl<T> PartialEq<T> for Ident
++where
++ T: ?Sized + AsRef<str>,
++{
++ fn eq(&self, other: &T) -> bool {
++ let other = other.as_ref();
++ match self {
++ Ident::Compiler(t) => t.to_string() == other,
++ Ident::Fallback(t) => t == other,
++ }
++ }
++}
++
++impl fmt::Display for Ident {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ Ident::Compiler(t) => t.fmt(f),
++ Ident::Fallback(t) => t.fmt(f),
++ }
++ }
++}
++
++impl fmt::Debug for Ident {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ Ident::Compiler(t) => t.fmt(f),
++ Ident::Fallback(t) => t.fmt(f),
++ }
++ }
++}
++
++#[derive(Clone)]
++pub enum Literal {
++ Compiler(proc_macro::Literal),
++ Fallback(fallback::Literal),
++}
++
++macro_rules! suffixed_numbers {
++ ($($name:ident => $kind:ident,)*) => ($(
++ pub fn $name(n: $kind) -> Literal {
++ if nightly_works() {
++ Literal::Compiler(proc_macro::Literal::$name(n))
++ } else {
++ Literal::Fallback(fallback::Literal::$name(n))
++ }
++ }
++ )*)
++}
++
++macro_rules! unsuffixed_integers {
++ ($($name:ident => $kind:ident,)*) => ($(
++ pub fn $name(n: $kind) -> Literal {
++ if nightly_works() {
++ Literal::Compiler(proc_macro::Literal::$name(n))
++ } else {
++ Literal::Fallback(fallback::Literal::$name(n))
++ }
++ }
++ )*)
++}
++
++impl Literal {
++ suffixed_numbers! {
++ u8_suffixed => u8,
++ u16_suffixed => u16,
++ u32_suffixed => u32,
++ u64_suffixed => u64,
++ usize_suffixed => usize,
++ i8_suffixed => i8,
++ i16_suffixed => i16,
++ i32_suffixed => i32,
++ i64_suffixed => i64,
++ isize_suffixed => isize,
++
++ f32_suffixed => f32,
++ f64_suffixed => f64,
++ }
++
++ #[cfg(u128)]
++ suffixed_numbers! {
++ i128_suffixed => i128,
++ u128_suffixed => u128,
++ }
++
++ unsuffixed_integers! {
++ u8_unsuffixed => u8,
++ u16_unsuffixed => u16,
++ u32_unsuffixed => u32,
++ u64_unsuffixed => u64,
++ usize_unsuffixed => usize,
++ i8_unsuffixed => i8,
++ i16_unsuffixed => i16,
++ i32_unsuffixed => i32,
++ i64_unsuffixed => i64,
++ isize_unsuffixed => isize,
++ }
++
++ #[cfg(u128)]
++ unsuffixed_integers! {
++ i128_unsuffixed => i128,
++ u128_unsuffixed => u128,
++ }
++
++ pub fn f32_unsuffixed(f: f32) -> Literal {
++ if nightly_works() {
++ Literal::Compiler(proc_macro::Literal::f32_unsuffixed(f))
++ } else {
++ Literal::Fallback(fallback::Literal::f32_unsuffixed(f))
++ }
++ }
++
++ pub fn f64_unsuffixed(f: f64) -> Literal {
++ if nightly_works() {
++ Literal::Compiler(proc_macro::Literal::f64_unsuffixed(f))
++ } else {
++ Literal::Fallback(fallback::Literal::f64_unsuffixed(f))
++ }
++ }
++
++ pub fn string(t: &str) -> Literal {
++ if nightly_works() {
++ Literal::Compiler(proc_macro::Literal::string(t))
++ } else {
++ Literal::Fallback(fallback::Literal::string(t))
++ }
++ }
++
++ pub fn character(t: char) -> Literal {
++ if nightly_works() {
++ Literal::Compiler(proc_macro::Literal::character(t))
++ } else {
++ Literal::Fallback(fallback::Literal::character(t))
++ }
++ }
++
++ pub fn byte_string(bytes: &[u8]) -> Literal {
++ if nightly_works() {
++ Literal::Compiler(proc_macro::Literal::byte_string(bytes))
++ } else {
++ Literal::Fallback(fallback::Literal::byte_string(bytes))
++ }
++ }
++
++ pub fn span(&self) -> Span {
++ match self {
++ Literal::Compiler(lit) => Span::Compiler(lit.span()),
++ Literal::Fallback(lit) => Span::Fallback(lit.span()),
++ }
++ }
++
++ pub fn set_span(&mut self, span: Span) {
++ match (self, span) {
++ (Literal::Compiler(lit), Span::Compiler(s)) => lit.set_span(s),
++ (Literal::Fallback(lit), Span::Fallback(s)) => lit.set_span(s),
++ _ => mismatch(),
++ }
++ }
++
++ fn unwrap_nightly(self) -> proc_macro::Literal {
++ match self {
++ Literal::Compiler(s) => s,
++ Literal::Fallback(_) => mismatch(),
++ }
++ }
++}
++
++impl From<fallback::Literal> for Literal {
++ fn from(s: fallback::Literal) -> Literal {
++ Literal::Fallback(s)
++ }
++}
++
++impl fmt::Display for Literal {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ Literal::Compiler(t) => t.fmt(f),
++ Literal::Fallback(t) => t.fmt(f),
++ }
++ }
++}
++
++impl fmt::Debug for Literal {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ match self {
++ Literal::Compiler(t) => t.fmt(f),
++ Literal::Fallback(t) => t.fmt(f),
++ }
++ }
++}
+diff --git a/third_party/rust/proc-macro2-0.4.27/tests/marker.rs b/third_party/rust/proc-macro2-0.4.27/tests/marker.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/tests/marker.rs
+@@ -0,0 +1,61 @@
++extern crate proc_macro2;
++
++use proc_macro2::*;
++
++macro_rules! assert_impl {
++ ($ty:ident is $($marker:ident) and +) => {
++ #[test]
++ #[allow(non_snake_case)]
++ fn $ty() {
++ fn assert_implemented<T: $($marker +)+>() {}
++ assert_implemented::<$ty>();
++ }
++ };
++
++ ($ty:ident is not $($marker:ident) or +) => {
++ #[test]
++ #[allow(non_snake_case)]
++ fn $ty() {
++ $(
++ {
++ // Implemented for types that implement $marker.
++ trait IsNotImplemented {
++ fn assert_not_implemented() {}
++ }
++ impl<T: $marker> IsNotImplemented for T {}
++
++ // Implemented for the type being tested.
++ trait IsImplemented {
++ fn assert_not_implemented() {}
++ }
++ impl IsImplemented for $ty {}
++
++ // If $ty does not implement $marker, there is no ambiguity
++ // in the following trait method call.
++ <$ty>::assert_not_implemented();
++ }
++ )+
++ }
++ };
++}
++
++assert_impl!(Delimiter is Send and Sync);
++assert_impl!(Spacing is Send and Sync);
++
++assert_impl!(Group is not Send or Sync);
++assert_impl!(Ident is not Send or Sync);
++assert_impl!(LexError is not Send or Sync);
++assert_impl!(Literal is not Send or Sync);
++assert_impl!(Punct is not Send or Sync);
++assert_impl!(Span is not Send or Sync);
++assert_impl!(TokenStream is not Send or Sync);
++assert_impl!(TokenTree is not Send or Sync);
++
++#[cfg(procmacro2_semver_exempt)]
++mod semver_exempt {
++ use super::*;
++
++ assert_impl!(LineColumn is Send and Sync);
++
++ assert_impl!(SourceFile is not Send or Sync);
++}
+diff --git a/third_party/rust/proc-macro2-0.4.27/tests/test.rs b/third_party/rust/proc-macro2-0.4.27/tests/test.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2-0.4.27/tests/test.rs
+@@ -0,0 +1,389 @@
++extern crate proc_macro2;
++
++use std::str::{self, FromStr};
++
++use proc_macro2::{Ident, Literal, Spacing, Span, TokenStream, TokenTree};
++
++#[test]
++fn terms() {
++ assert_eq!(
++ Ident::new("String", Span::call_site()).to_string(),
++ "String"
++ );
++ assert_eq!(Ident::new("fn", Span::call_site()).to_string(), "fn");
++ assert_eq!(Ident::new("_", Span::call_site()).to_string(), "_");
++}
++
++#[test]
++#[cfg(procmacro2_semver_exempt)]
++fn raw_terms() {
++ assert_eq!(
++ Ident::new_raw("String", Span::call_site()).to_string(),
++ "r#String"
++ );
++ assert_eq!(Ident::new_raw("fn", Span::call_site()).to_string(), "r#fn");
++ assert_eq!(Ident::new_raw("_", Span::call_site()).to_string(), "r#_");
++}
++
++#[test]
++#[should_panic(expected = "Ident is not allowed to be empty; use Option<Ident>")]
++fn term_empty() {
++ Ident::new("", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = "Ident cannot be a number; use Literal instead")]
++fn term_number() {
++ Ident::new("255", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = "\"a#\" is not a valid Ident")]
++fn term_invalid() {
++ Ident::new("a#", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = "not a valid Ident")]
++fn raw_term_empty() {
++ Ident::new("r#", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = "not a valid Ident")]
++fn raw_term_number() {
++ Ident::new("r#255", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = "\"r#a#\" is not a valid Ident")]
++fn raw_term_invalid() {
++ Ident::new("r#a#", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = "not a valid Ident")]
++fn lifetime_empty() {
++ Ident::new("'", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = "not a valid Ident")]
++fn lifetime_number() {
++ Ident::new("'255", Span::call_site());
++}
++
++#[test]
++#[should_panic(expected = r#""\'a#" is not a valid Ident"#)]
++fn lifetime_invalid() {
++ Ident::new("'a#", Span::call_site());
++}
++
++#[test]
++fn literals() {
++ assert_eq!(Literal::string("foo").to_string(), "\"foo\"");
++ assert_eq!(Literal::string("\"").to_string(), "\"\\\"\"");
++ assert_eq!(Literal::f32_unsuffixed(10.0).to_string(), "10.0");
++}
++
++#[test]
++fn roundtrip() {
++ fn roundtrip(p: &str) {
++ println!("parse: {}", p);
++ let s = p.parse::<TokenStream>().unwrap().to_string();
++ println!("first: {}", s);
++ let s2 = s.to_string().parse::<TokenStream>().unwrap().to_string();
++ assert_eq!(s, s2);
++ }
++ roundtrip("a");
++ roundtrip("<<");
++ roundtrip("<<=");
++ roundtrip(
++ "
++ 1
++ 1.0
++ 1f32
++ 2f64
++ 1usize
++ 4isize
++ 4e10
++ 1_000
++ 1_0i32
++ 8u8
++ 9
++ 0
++ 0xffffffffffffffffffffffffffffffff
++ ",
++ );
++ roundtrip("'a");
++ roundtrip("'_");
++ roundtrip("'static");
++ roundtrip("'\\u{10__FFFF}'");
++ roundtrip("\"\\u{10_F0FF__}foo\\u{1_0_0_0__}\"");
++}
++
++#[test]
++fn fail() {
++ fn fail(p: &str) {
++ if let Ok(s) = p.parse::<TokenStream>() {
++ panic!("should have failed to parse: {}\n{:#?}", p, s);
++ }
++ }
++ fail("1x");
++ fail("1u80");
++ fail("1f320");
++ fail("' static");
++ fail("r#1");
++ fail("r#_");
++}
++
++#[cfg(span_locations)]
++#[test]
++fn span_test() {
++ use proc_macro2::TokenTree;
++
++ fn check_spans(p: &str, mut lines: &[(usize, usize, usize, usize)]) {
++ let ts = p.parse::<TokenStream>().unwrap();
++ check_spans_internal(ts, &mut lines);
++ }
++
++ fn check_spans_internal(ts: TokenStream, lines: &mut &[(usize, usize, usize, usize)]) {
++ for i in ts {
++ if let Some((&(sline, scol, eline, ecol), rest)) = lines.split_first() {
++ *lines = rest;
++
++ let start = i.span().start();
++ assert_eq!(start.line, sline, "sline did not match for {}", i);
++ assert_eq!(start.column, scol, "scol did not match for {}", i);
++
++ let end = i.span().end();
++ assert_eq!(end.line, eline, "eline did not match for {}", i);
++ assert_eq!(end.column, ecol, "ecol did not match for {}", i);
++
++ match i {
++ TokenTree::Group(ref g) => {
++ check_spans_internal(g.stream().clone(), lines);
++ }
++ _ => {}
++ }
++ }
++ }
++ }
++
++ check_spans(
++ "\
++/// This is a document comment
++testing 123
++{
++ testing 234
++}",
++ &[
++ (1, 0, 1, 30), // #
++ (1, 0, 1, 30), // [ ... ]
++ (1, 0, 1, 30), // doc
++ (1, 0, 1, 30), // =
++ (1, 0, 1, 30), // "This is..."
++ (2, 0, 2, 7), // testing
++ (2, 8, 2, 11), // 123
++ (3, 0, 5, 1), // { ... }
++ (4, 2, 4, 9), // testing
++ (4, 10, 4, 13), // 234
++ ],
++ );
++}
++
++#[cfg(procmacro2_semver_exempt)]
++#[cfg(not(nightly))]
++#[test]
++fn default_span() {
++ let start = Span::call_site().start();
++ assert_eq!(start.line, 1);
++ assert_eq!(start.column, 0);
++ let end = Span::call_site().end();
++ assert_eq!(end.line, 1);
++ assert_eq!(end.column, 0);
++ let source_file = Span::call_site().source_file();
++ assert_eq!(source_file.path().to_string_lossy(), "<unspecified>");
++ assert!(!source_file.is_real());
++}
++
++#[cfg(procmacro2_semver_exempt)]
++#[test]
++fn span_join() {
++ let source1 = "aaa\nbbb"
++ .parse::<TokenStream>()
++ .unwrap()
++ .into_iter()
++ .collect::<Vec<_>>();
++ let source2 = "ccc\nddd"
++ .parse::<TokenStream>()
++ .unwrap()
++ .into_iter()
++ .collect::<Vec<_>>();
++
++ assert!(source1[0].span().source_file() != source2[0].span().source_file());
++ assert_eq!(
++ source1[0].span().source_file(),
++ source1[1].span().source_file()
++ );
++
++ let joined1 = source1[0].span().join(source1[1].span());
++ let joined2 = source1[0].span().join(source2[0].span());
++ assert!(joined1.is_some());
++ assert!(joined2.is_none());
++
++ let start = joined1.unwrap().start();
++ let end = joined1.unwrap().end();
++ assert_eq!(start.line, 1);
++ assert_eq!(start.column, 0);
++ assert_eq!(end.line, 2);
++ assert_eq!(end.column, 3);
++
++ assert_eq!(
++ joined1.unwrap().source_file(),
++ source1[0].span().source_file()
++ );
++}
++
++#[test]
++fn no_panic() {
++ let s = str::from_utf8(b"b\'\xc2\x86 \x00\x00\x00^\"").unwrap();
++ assert!(s.parse::<proc_macro2::TokenStream>().is_err());
++}
++
++#[test]
++fn tricky_doc_comment() {
++ let stream = "/**/".parse::<proc_macro2::TokenStream>().unwrap();
++ let tokens = stream.into_iter().collect::<Vec<_>>();
++ assert!(tokens.is_empty(), "not empty -- {:?}", tokens);
++
++ let stream = "/// doc".parse::<proc_macro2::TokenStream>().unwrap();
++ let tokens = stream.into_iter().collect::<Vec<_>>();
++ assert!(tokens.len() == 2, "not length 2 -- {:?}", tokens);
++ match tokens[0] {
++ proc_macro2::TokenTree::Punct(ref tt) => assert_eq!(tt.as_char(), '#'),
++ _ => panic!("wrong token {:?}", tokens[0]),
++ }
++ let mut tokens = match tokens[1] {
++ proc_macro2::TokenTree::Group(ref tt) => {
++ assert_eq!(tt.delimiter(), proc_macro2::Delimiter::Bracket);
++ tt.stream().into_iter()
++ }
++ _ => panic!("wrong token {:?}", tokens[0]),
++ };
++
++ match tokens.next().unwrap() {
++ proc_macro2::TokenTree::Ident(ref tt) => assert_eq!(tt.to_string(), "doc"),
++ t => panic!("wrong token {:?}", t),
++ }
++ match tokens.next().unwrap() {
++ proc_macro2::TokenTree::Punct(ref tt) => assert_eq!(tt.as_char(), '='),
++ t => panic!("wrong token {:?}", t),
++ }
++ match tokens.next().unwrap() {
++ proc_macro2::TokenTree::Literal(ref tt) => {
++ assert_eq!(tt.to_string(), "\" doc\"");
++ }
++ t => panic!("wrong token {:?}", t),
++ }
++ assert!(tokens.next().is_none());
++
++ let stream = "//! doc".parse::<proc_macro2::TokenStream>().unwrap();
++ let tokens = stream.into_iter().collect::<Vec<_>>();
++ assert!(tokens.len() == 3, "not length 3 -- {:?}", tokens);
++}
++
++#[test]
++fn op_before_comment() {
++ let mut tts = TokenStream::from_str("~// comment").unwrap().into_iter();
++ match tts.next().unwrap() {
++ TokenTree::Punct(tt) => {
++ assert_eq!(tt.as_char(), '~');
++ assert_eq!(tt.spacing(), Spacing::Alone);
++ }
++ wrong => panic!("wrong token {:?}", wrong),
++ }
++}
++
++#[test]
++fn raw_identifier() {
++ let mut tts = TokenStream::from_str("r#dyn").unwrap().into_iter();
++ match tts.next().unwrap() {
++ TokenTree::Ident(raw) => assert_eq!("r#dyn", raw.to_string()),
++ wrong => panic!("wrong token {:?}", wrong),
++ }
++ assert!(tts.next().is_none());
++}
++
++#[test]
++fn test_debug_ident() {
++ let ident = Ident::new("proc_macro", Span::call_site());
++
++ #[cfg(not(procmacro2_semver_exempt))]
++ let expected = "Ident(proc_macro)";
++
++ #[cfg(procmacro2_semver_exempt)]
++ let expected = "Ident { sym: proc_macro, span: bytes(0..0) }";
++
++ assert_eq!(expected, format!("{:?}", ident));
++}
++
++#[test]
++fn test_debug_tokenstream() {
++ let tts = TokenStream::from_str("[a + 1]").unwrap();
++
++ #[cfg(not(procmacro2_semver_exempt))]
++ let expected = "\
++TokenStream [
++ Group {
++ delimiter: Bracket,
++ stream: TokenStream [
++ Ident {
++ sym: a
++ },
++ Punct {
++ op: '+',
++ spacing: Alone
++ },
++ Literal {
++ lit: 1
++ }
++ ]
++ }
++]\
++ ";
++
++ #[cfg(procmacro2_semver_exempt)]
++ let expected = "\
++TokenStream [
++ Group {
++ delimiter: Bracket,
++ stream: TokenStream [
++ Ident {
++ sym: a,
++ span: bytes(2..3)
++ },
++ Punct {
++ op: '+',
++ spacing: Alone,
++ span: bytes(4..5)
++ },
++ Literal {
++ lit: 1,
++ span: bytes(6..7)
++ }
++ ],
++ span: bytes(1..8)
++ }
++]\
++ ";
++
++ assert_eq!(expected, format!("{:#?}", tts));
++}
++
++#[test]
++fn default_tokenstream_is_empty() {
++ let default_token_stream: TokenStream = Default::default();
++
++ assert!(default_token_stream.is_empty());
++}
+diff --git a/third_party/rust/proc-macro2/.cargo-checksum.json b/third_party/rust/proc-macro2/.cargo-checksum.json
+--- a/third_party/rust/proc-macro2/.cargo-checksum.json
++++ b/third_party/rust/proc-macro2/.cargo-checksum.json
+@@ -1 +1 @@
+-{"files":{"Cargo.toml":"b523856472549844b4bf20eca0473d955a7e5eeb95c70eddd31a05ac455427bb","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"89857eaaa305afe540abcf56fabae0194dfb4e7906a8098b7206acb23ed11ce8","build.rs":"36fa668f3bf309f243d0e977e8428446cc424303139c1f63410b3c2e30445aec","src/fallback.rs":"e4d1bcb1e92383a2285e6c947dd74b0e34144904948db68127faea627f5dd6ff","src/lib.rs":"896a1d212e30902ff051313808007406ca4471c27880a6ef19508f0ebb8333ee","src/strnom.rs":"60f5380106dbe568cca7abd09877e133c874fbee95d502e4830425c4613a640d","src/wrapper.rs":"0d7fe28ab2b7ee02b8eb8c5a636da364c60f6704b23e7db0a1ddd57c742f54b1","tests/marker.rs":"0227d07bbc7f2e2ad34662a6acb65668b7dc2f79141c4faa672703a04e27bea0","tests/test.rs":"166d35835355bdaa85bcf69de4dfb56ccddd8acf2e1a8cbc506782632b151674"},"package":"4d317f9caece796be1980837fd5cb3dfec5613ebdb04ad0956deea83ce168915"}
+\ No newline at end of file
++{"files":{"Cargo.toml":"9d18d9cad8a90dd6eb3f9ff06357a9f9a93fdb4697445bbdb4b77be361377708","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"378f5840b258e2779c39418f3f2d7b2ba96f1c7917dd6be0713f88305dbda397","README.md":"362a2156f7645528061b6e8487a2eb0f32f1693012ed82ee57afa05c039bba0d","build.rs":"0cc6e2cb919ddbff59cf1d810283939f97a59f0037540c0f2ee3453237635ff8","src/fallback.rs":"5c6379a90735e27abcc40253b223158c6b1e5784f3850bc423335363e87ef038","src/lib.rs":"3c257d875da825fb74522d74459d4ac697ab3c998f58af57aa17ae9dfaa19308","src/strnom.rs":"37f7791f73f123817ad5403af1d4e2a0714be27401729a2d451bc80b1f26bac9","src/wrapper.rs":"6e9aa48b55da1edd81a72552d6705e251ea5e77827a611bed5fa6a89ee9e3d59","tests/features.rs":"a86deb8644992a4eb64d9fd493eff16f9cf9c5cb6ade3a634ce0c990cf87d559","tests/marker.rs":"c2652e3ae1dfcb94d2e6313b29712c5dcbd0fe62026913e67bb7cebd7560aade","tests/test.rs":"8c427be9cba1fa8d4a16647e53e3545e5863e29e2c0b311c93c9dd1399abf6a1"},"package":"afdc77cc74ec70ed262262942ebb7dac3d479e9e5cfa2da1841c0806f6cdabcc"}
+\ No newline at end of file
+diff --git a/third_party/rust/proc-macro2/Cargo.toml b/third_party/rust/proc-macro2/Cargo.toml
+--- a/third_party/rust/proc-macro2/Cargo.toml
++++ b/third_party/rust/proc-macro2/Cargo.toml
+@@ -3,7 +3,7 @@
+ # When uploading crates to the registry Cargo will automatically
+ # "normalize" Cargo.toml files for maximal compatibility
+ # with all versions of Cargo and also rewrite `path` dependencies
+-# to registry (e.g. crates.io) dependencies
++# to registry (e.g., crates.io) dependencies
+ #
+ # If you believe there's an error in this file please file an
+ # issue against the rust-lang/cargo repository. If you're
+@@ -11,24 +11,28 @@
+ # will likely look very different (and much more reasonable)
+
+ [package]
++edition = "2018"
+ name = "proc-macro2"
+-version = "0.4.27"
++version = "1.0.4"
+ authors = ["Alex Crichton <alex@alexcrichton.com>"]
+-build = "build.rs"
+ description = "A stable implementation of the upcoming new `proc_macro` API. Comes with an\noption, off by default, to also reimplement itself in terms of the upstream\nunstable API.\n"
+ homepage = "https://github.com/alexcrichton/proc-macro2"
+ documentation = "https://docs.rs/proc-macro2"
+ readme = "README.md"
+ keywords = ["macros"]
+-license = "MIT/Apache-2.0"
++license = "MIT OR Apache-2.0"
+ repository = "https://github.com/alexcrichton/proc-macro2"
+ [package.metadata.docs.rs]
+ rustc-args = ["--cfg", "procmacro2_semver_exempt"]
+ rustdoc-args = ["--cfg", "procmacro2_semver_exempt"]
++
++[lib]
++name = "proc_macro2"
+ [dependencies.unicode-xid]
+-version = "0.1"
++version = "0.2"
+ [dev-dependencies.quote]
+-version = "0.6"
++version = "1.0"
++default_features = false
+
+ [features]
+ default = ["proc-macro"]
+diff --git a/third_party/rust/proc-macro2/README.md b/third_party/rust/proc-macro2/README.md
+--- a/third_party/rust/proc-macro2/README.md
++++ b/third_party/rust/proc-macro2/README.md
+@@ -5,7 +5,7 @@
+ [![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/proc-macro2)
+
+ A wrapper around the procedural macro API of the compiler's `proc_macro` crate.
+-This library serves three purposes:
++This library serves two purposes:
+
+ - **Bring proc-macro-like functionality to other contexts like build.rs and
+ main.rs.** Types from `proc_macro` are entirely specific to procedural macros
+@@ -21,13 +21,6 @@
+ unit test. In order for helper libraries or components of a macro to be
+ testable in isolation, they must be implemented using `proc_macro2`.
+
+-- **Provide the latest and greatest APIs across all compiler versions.**
+- Procedural macros were first introduced to Rust in 1.15.0 with an extremely
+- minimal interface. Since then, many improvements have landed to make macros
+- more flexible and easier to write. This library tracks the procedural macro
+- API of the most recent stable compiler but employs a polyfill to provide that
+- API consistently across any compiler since 1.15.0.
+-
+ [syn]: https://github.com/dtolnay/syn
+ [quote]: https://github.com/dtolnay/quote
+
+@@ -35,7 +28,7 @@
+
+ ```toml
+ [dependencies]
+-proc-macro2 = "0.4"
++proc-macro2 = "1.0"
+ ```
+
+ The skeleton of a typical procedural macro typically looks like this:
+@@ -58,7 +51,7 @@
+ If parsing with [Syn], you'll use [`parse_macro_input!`] instead to propagate
+ parse errors correctly back to the compiler when parsing fails.
+
+-[`parse_macro_input!`]: https://docs.rs/syn/0.15/syn/macro.parse_macro_input.html
++[`parse_macro_input!`]: https://docs.rs/syn/1.0/syn/macro.parse_macro_input.html
+
+ ## Unstable features
+
+@@ -67,10 +60,10 @@
+ proc-macro2 by default.
+
+ To opt into the additional APIs available in the most recent nightly compiler,
+-the `procmacro2_semver_exempt` config flag must be passed to rustc. As usual, we
+-will polyfill those nightly-only APIs all the way back to Rust 1.15.0. As these
+-are unstable APIs that track the nightly compiler, minor versions of proc-macro2
+-may make breaking changes to them at any time.
++the `procmacro2_semver_exempt` config flag must be passed to rustc. We will
++polyfill those nightly-only APIs back to Rust 1.31.0. As these are unstable APIs
++that track the nightly compiler, minor versions of proc-macro2 may make breaking
++changes to them at any time.
+
+ ```
+ RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build
+@@ -82,19 +75,19 @@
+
+ Semver exempt methods are marked as such in the proc-macro2 documentation.
+
+-# License
++<br>
+
+-This project is licensed under either of
++#### License
+
+- * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
+- http://www.apache.org/licenses/LICENSE-2.0)
+- * MIT license ([LICENSE-MIT](LICENSE-MIT) or
+- http://opensource.org/licenses/MIT)
++<sup>
++Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
++2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
++</sup>
+
+-at your option.
+-
+-### Contribution
++<br>
+
++<sub>
+ Unless you explicitly state otherwise, any contribution intentionally submitted
+-for inclusion in Serde by you, as defined in the Apache-2.0 license, shall be
+-dual licensed as above, without any additional terms or conditions.
++for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
++be dual licensed as above, without any additional terms or conditions.
++</sub>
+diff --git a/third_party/rust/proc-macro2/build.rs b/third_party/rust/proc-macro2/build.rs
+--- a/third_party/rust/proc-macro2/build.rs
++++ b/third_party/rust/proc-macro2/build.rs
+@@ -1,9 +1,5 @@
+ // rustc-cfg emitted by the build script:
+ //
+-// "u128"
+-// Include u128 and i128 constructors for proc_macro2::Literal. Enabled on
+-// any compiler 1.26+.
+-//
+ // "use_proc_macro"
+ // Link to extern crate proc_macro. Available on any compiler and any target
+ // except wasm32. Requires "proc-macro" Cargo cfg to be enabled (default is
+@@ -18,15 +14,11 @@
+ // procmacro2_semver_exempt surface area is implemented by using the
+ // nightly-only proc_macro API.
+ //
+-// "slow_extend"
+-// Fallback when `impl Extend for TokenStream` is not available. These impls
+-// were added one version later than the rest of the proc_macro token API.
+-// Enabled on rustc 1.29 only.
+-//
+-// "nightly"
+-// Enable the Span::unwrap method. This is to support proc_macro_span and
+-// proc_macro_diagnostic use on the nightly channel without requiring the
+-// semver exemption opt-in. Enabled when building with nightly.
++// "proc_macro_span"
++// Enable non-dummy behavior of Span::start and Span::end methods which
++// requires an unstable compiler feature. Enabled when building with
++// nightly, unless `-Z allow-feature` in RUSTFLAGS disallows unstable
++// features.
+ //
+ // "super_unstable"
+ // Implement the semver exempt API in terms of the nightly-only proc_macro
+@@ -39,21 +31,20 @@
+ // location inside spans is a performance hit.
+
+ use std::env;
+-use std::process::Command;
++use std::process::{self, Command};
+ use std::str;
+
+ fn main() {
+ println!("cargo:rerun-if-changed=build.rs");
+
+- let target = env::var("TARGET").unwrap();
+-
+ let version = match rustc_version() {
+ Some(version) => version,
+ None => return,
+ };
+
+- if version.minor >= 26 {
+- println!("cargo:rustc-cfg=u128");
++ if version.minor < 31 {
++ eprintln!("Minimum supported rustc version is 1.31");
++ process::exit(1);
+ }
+
+ let semver_exempt = cfg!(procmacro2_semver_exempt);
+@@ -66,23 +57,19 @@
+ println!("cargo:rustc-cfg=span_locations");
+ }
+
++ let target = env::var("TARGET").unwrap();
+ if !enable_use_proc_macro(&target) {
+ return;
+ }
+
+ println!("cargo:rustc-cfg=use_proc_macro");
+
+- // Rust 1.29 stabilized the necessary APIs in the `proc_macro` crate
+- if version.nightly || version.minor >= 29 && !semver_exempt {
++ if version.nightly || !semver_exempt {
+ println!("cargo:rustc-cfg=wrap_proc_macro");
+ }
+
+- if version.minor == 29 {
+- println!("cargo:rustc-cfg=slow_extend");
+- }
+-
+- if version.nightly {
+- println!("cargo:rustc-cfg=nightly");
++ if version.nightly && feature_allowed("proc_macro_span") {
++ println!("cargo:rustc-cfg=proc_macro_span");
+ }
+
+ if semver_exempt && version.nightly {
+@@ -106,28 +93,37 @@
+ }
+
+ fn rustc_version() -> Option<RustcVersion> {
+- macro_rules! otry {
+- ($e:expr) => {
+- match $e {
+- Some(e) => e,
+- None => return None,
+- }
+- };
+- }
+-
+- let rustc = otry!(env::var_os("RUSTC"));
+- let output = otry!(Command::new(rustc).arg("--version").output().ok());
+- let version = otry!(str::from_utf8(&output.stdout).ok());
+- let nightly = version.contains("nightly");
++ let rustc = env::var_os("RUSTC")?;
++ let output = Command::new(rustc).arg("--version").output().ok()?;
++ let version = str::from_utf8(&output.stdout).ok()?;
++ let nightly = version.contains("nightly") || version.contains("dev");
+ let mut pieces = version.split('.');
+ if pieces.next() != Some("rustc 1") {
+ return None;
+ }
+- let minor = otry!(pieces.next());
+- let minor = otry!(minor.parse().ok());
++ let minor = pieces.next()?.parse().ok()?;
++ Some(RustcVersion { minor, nightly })
++}
++
++fn feature_allowed(feature: &str) -> bool {
++ // Recognized formats:
++ //
++ // -Z allow-features=feature1,feature2
++ //
++ // -Zallow-features=feature1,feature2
+
+- Some(RustcVersion {
+- minor: minor,
+- nightly: nightly,
+- })
++ if let Some(rustflags) = env::var_os("RUSTFLAGS") {
++ for mut flag in rustflags.to_string_lossy().split(' ') {
++ if flag.starts_with("-Z") {
++ flag = &flag["-Z".len()..];
++ }
++ if flag.starts_with("allow-features=") {
++ flag = &flag["allow-features=".len()..];
++ return flag.split(',').any(|allowed| allowed == feature);
++ }
++ }
++ }
++
++ // No allow-features= flag, allowed by default.
++ true
+ }
+diff --git a/third_party/rust/proc-macro2/src/fallback.rs b/third_party/rust/proc-macro2/src/fallback.rs
+--- a/third_party/rust/proc-macro2/src/fallback.rs
++++ b/third_party/rust/proc-macro2/src/fallback.rs
+@@ -1,20 +1,20 @@
+ #[cfg(span_locations)]
+ use std::cell::RefCell;
+-#[cfg(procmacro2_semver_exempt)]
++#[cfg(span_locations)]
+ use std::cmp;
+ use std::fmt;
+ use std::iter;
++use std::ops::RangeBounds;
+ #[cfg(procmacro2_semver_exempt)]
+ use std::path::Path;
+ use std::path::PathBuf;
+ use std::str::FromStr;
+ use std::vec;
+
+-use strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult};
++use crate::strnom::{block_comment, skip_whitespace, whitespace, word_break, Cursor, PResult};
++use crate::{Delimiter, Punct, Spacing, TokenTree};
+ use unicode_xid::UnicodeXID;
+
+-use {Delimiter, Punct, Spacing, TokenTree};
+-
+ #[derive(Clone)]
+ pub struct TokenStream {
+ inner: Vec<TokenTree>,
+@@ -35,8 +35,8 @@
+
+ #[cfg(span_locations)]
+ fn get_cursor(src: &str) -> Cursor {
+- // Create a dummy file & add it to the codemap
+- CODEMAP.with(|cm| {
++ // Create a dummy file & add it to the source map
++ SOURCE_MAP.with(|cm| {
+ let mut cm = cm.borrow_mut();
+ let name = format!("<parsed string {}>", cm.files.len());
+ let span = cm.add_file(&name, src);
+@@ -56,7 +56,7 @@
+ type Err = LexError;
+
+ fn from_str(src: &str) -> Result<TokenStream, LexError> {
+- // Create a dummy file & add it to the codemap
++ // Create a dummy file & add it to the source map
+ let cursor = get_cursor(src);
+
+ match token_stream(cursor) {
+@@ -118,8 +118,8 @@
+ }
+
+ #[cfg(use_proc_macro)]
+-impl From<::proc_macro::TokenStream> for TokenStream {
+- fn from(inner: ::proc_macro::TokenStream) -> TokenStream {
++impl From<proc_macro::TokenStream> for TokenStream {
++ fn from(inner: proc_macro::TokenStream) -> TokenStream {
+ inner
+ .to_string()
+ .parse()
+@@ -128,8 +128,8 @@
+ }
+
+ #[cfg(use_proc_macro)]
+-impl From<TokenStream> for ::proc_macro::TokenStream {
+- fn from(inner: TokenStream) -> ::proc_macro::TokenStream {
++impl From<TokenStream> for proc_macro::TokenStream {
++ fn from(inner: TokenStream) -> proc_macro::TokenStream {
+ inner
+ .to_string()
+ .parse()
+@@ -225,7 +225,7 @@
+
+ #[cfg(span_locations)]
+ thread_local! {
+- static CODEMAP: RefCell<Codemap> = RefCell::new(Codemap {
++ static SOURCE_MAP: RefCell<SourceMap> = RefCell::new(SourceMap {
+ // NOTE: We start with a single dummy file which all call_site() and
+ // def_site() spans reference.
+ files: vec![{
+@@ -295,12 +295,12 @@
+ }
+
+ #[cfg(span_locations)]
+-struct Codemap {
++struct SourceMap {
+ files: Vec<FileInfo>,
+ }
+
+ #[cfg(span_locations)]
+-impl Codemap {
++impl SourceMap {
+ fn next_start_pos(&self) -> u32 {
+ // Add 1 so there's always space between files.
+ //
+@@ -314,22 +314,19 @@
+ let lo = self.next_start_pos();
+ // XXX(nika): Shouild we bother doing a checked cast or checked add here?
+ let span = Span {
+- lo: lo,
++ lo,
+ hi: lo + (src.len() as u32),
+ };
+
+ #[cfg(procmacro2_semver_exempt)]
+ self.files.push(FileInfo {
+ name: name.to_owned(),
+- span: span,
+- lines: lines,
++ span,
++ lines,
+ });
+
+ #[cfg(not(procmacro2_semver_exempt))]
+- self.files.push(FileInfo {
+- span: span,
+- lines: lines,
+- });
++ self.files.push(FileInfo { span, lines });
+ let _ = name;
+
+ span
+@@ -384,7 +381,7 @@
+
+ #[cfg(procmacro2_semver_exempt)]
+ pub fn source_file(&self) -> SourceFile {
+- CODEMAP.with(|cm| {
++ SOURCE_MAP.with(|cm| {
+ let cm = cm.borrow();
+ let fi = cm.fileinfo(*self);
+ SourceFile {
+@@ -395,7 +392,7 @@
+
+ #[cfg(span_locations)]
+ pub fn start(&self) -> LineColumn {
+- CODEMAP.with(|cm| {
++ SOURCE_MAP.with(|cm| {
+ let cm = cm.borrow();
+ let fi = cm.fileinfo(*self);
+ fi.offset_line_column(self.lo as usize)
+@@ -404,16 +401,21 @@
+
+ #[cfg(span_locations)]
+ pub fn end(&self) -> LineColumn {
+- CODEMAP.with(|cm| {
++ SOURCE_MAP.with(|cm| {
+ let cm = cm.borrow();
+ let fi = cm.fileinfo(*self);
+ fi.offset_line_column(self.hi as usize)
+ })
+ }
+
+- #[cfg(procmacro2_semver_exempt)]
++ #[cfg(not(span_locations))]
++ pub fn join(&self, _other: Span) -> Option<Span> {
++ Some(Span {})
++ }
++
++ #[cfg(span_locations)]
+ pub fn join(&self, other: Span) -> Option<Span> {
+- CODEMAP.with(|cm| {
++ SOURCE_MAP.with(|cm| {
+ let cm = cm.borrow();
+ // If `other` is not within the same FileInfo as us, return None.
+ if !cm.fileinfo(*self).span_within(other) {
+@@ -453,8 +455,8 @@
+ impl Group {
+ pub fn new(delimiter: Delimiter, stream: TokenStream) -> Group {
+ Group {
+- delimiter: delimiter,
+- stream: stream,
++ delimiter,
++ stream,
+ span: Span::call_site(),
+ }
+ }
+@@ -471,12 +473,10 @@
+ self.span
+ }
+
+- #[cfg(procmacro2_semver_exempt)]
+ pub fn span_open(&self) -> Span {
+ self.span
+ }
+
+- #[cfg(procmacro2_semver_exempt)]
+ pub fn span_close(&self) -> Span {
+ self.span
+ }
+@@ -523,12 +523,12 @@
+
+ impl Ident {
+ fn _new(string: &str, raw: bool, span: Span) -> Ident {
+- validate_term(string);
++ validate_ident(string);
+
+ Ident {
+ sym: string.to_owned(),
+- span: span,
+- raw: raw,
++ span,
++ raw,
+ }
+ }
+
+@@ -566,7 +566,7 @@
+ || (c > '\x7f' && UnicodeXID::is_xid_continue(c))
+ }
+
+-fn validate_term(string: &str) {
++fn validate_ident(string: &str) {
+ let validate = string;
+ if validate.is_empty() {
+ panic!("Ident is not allowed to be empty; use Option<Ident>");
+@@ -671,7 +671,7 @@
+ impl Literal {
+ fn _new(text: String) -> Literal {
+ Literal {
+- text: text,
++ text,
+ span: Span::call_site(),
+ }
+ }
+@@ -681,42 +681,34 @@
+ u16_suffixed => u16,
+ u32_suffixed => u32,
+ u64_suffixed => u64,
++ u128_suffixed => u128,
+ usize_suffixed => usize,
+ i8_suffixed => i8,
+ i16_suffixed => i16,
+ i32_suffixed => i32,
+ i64_suffixed => i64,
++ i128_suffixed => i128,
+ isize_suffixed => isize,
+
+ f32_suffixed => f32,
+ f64_suffixed => f64,
+ }
+
+- #[cfg(u128)]
+- suffixed_numbers! {
+- u128_suffixed => u128,
+- i128_suffixed => i128,
+- }
+-
+ unsuffixed_numbers! {
+ u8_unsuffixed => u8,
+ u16_unsuffixed => u16,
+ u32_unsuffixed => u32,
+ u64_unsuffixed => u64,
++ u128_unsuffixed => u128,
+ usize_unsuffixed => usize,
+ i8_unsuffixed => i8,
+ i16_unsuffixed => i16,
+ i32_unsuffixed => i32,
+ i64_unsuffixed => i64,
++ i128_unsuffixed => i128,
+ isize_unsuffixed => isize,
+ }
+
+- #[cfg(u128)]
+- unsuffixed_numbers! {
+- u128_unsuffixed => u128,
+- i128_unsuffixed => i128,
+- }
+-
+ pub fn f32_unsuffixed(f: f32) -> Literal {
+ let mut s = f.to_string();
+ if !s.contains(".") {
+@@ -734,17 +726,31 @@
+ }
+
+ pub fn string(t: &str) -> Literal {
+- let mut s = t
+- .chars()
+- .flat_map(|c| c.escape_default())
+- .collect::<String>();
+- s.push('"');
+- s.insert(0, '"');
+- Literal::_new(s)
++ let mut text = String::with_capacity(t.len() + 2);
++ text.push('"');
++ for c in t.chars() {
++ if c == '\'' {
++ // escape_default turns this into "\'" which is unnecessary.
++ text.push(c);
++ } else {
++ text.extend(c.escape_default());
++ }
++ }
++ text.push('"');
++ Literal::_new(text)
+ }
+
+ pub fn character(t: char) -> Literal {
+- Literal::_new(format!("'{}'", t.escape_default().collect::<String>()))
++ let mut text = String::new();
++ text.push('\'');
++ if t == '"' {
++ // escape_default turns this into '\"' which is unnecessary.
++ text.push(t);
++ } else {
++ text.extend(t.escape_default());
++ }
++ text.push('\'');
++ Literal::_new(text)
+ }
+
+ pub fn byte_string(bytes: &[u8]) -> Literal {
+@@ -757,7 +763,7 @@
+ b'\r' => escaped.push_str(r"\r"),
+ b'"' => escaped.push_str("\\\""),
+ b'\\' => escaped.push_str("\\\\"),
+- b'\x20'...b'\x7E' => escaped.push(*b as char),
++ b'\x20'..=b'\x7E' => escaped.push(*b as char),
+ _ => escaped.push_str(&format!("\\x{:02X}", b)),
+ }
+ }
+@@ -772,6 +778,10 @@
+ pub fn set_span(&mut self, span: Span) {
+ self.span = span;
+ }
++
++ pub fn subspan<R: RangeBounds<usize>>(&self, _range: R) -> Option<Span> {
++ None
++ }
+ }
+
+ impl fmt::Display for Literal {
+@@ -817,21 +827,21 @@
+ fn spanned<'a, T>(
+ input: Cursor<'a>,
+ f: fn(Cursor<'a>) -> PResult<'a, T>,
+-) -> PResult<'a, (T, ::Span)> {
++) -> PResult<'a, (T, crate::Span)> {
+ let (a, b) = f(skip_whitespace(input))?;
+- Ok((a, ((b, ::Span::_new_stable(Span::call_site())))))
++ Ok((a, ((b, crate::Span::_new_stable(Span::call_site())))))
+ }
+
+ #[cfg(span_locations)]
+ fn spanned<'a, T>(
+ input: Cursor<'a>,
+ f: fn(Cursor<'a>) -> PResult<'a, T>,
+-) -> PResult<'a, (T, ::Span)> {
++) -> PResult<'a, (T, crate::Span)> {
+ let input = skip_whitespace(input);
+ let lo = input.off;
+ let (a, b) = f(input)?;
+ let hi = a.off;
+- let span = ::Span::_new_stable(Span { lo: lo, hi: hi });
++ let span = crate::Span::_new_stable(Span { lo, hi });
+ Ok((a, (b, span)))
+ }
+
+@@ -842,9 +852,9 @@
+ }
+
+ named!(token_kind -> TokenTree, alt!(
+- map!(group, |g| TokenTree::Group(::Group::_new_stable(g)))
++ map!(group, |g| TokenTree::Group(crate::Group::_new_stable(g)))
+ |
+- map!(literal, |l| TokenTree::Literal(::Literal::_new_stable(l))) // must be before symbol
++ map!(literal, |l| TokenTree::Literal(crate::Literal::_new_stable(l))) // must be before symbol
+ |
+ map!(op, TokenTree::Punct)
+ |
+@@ -876,14 +886,27 @@
+ }
+
+ fn symbol(input: Cursor) -> PResult<TokenTree> {
+- let mut chars = input.char_indices();
++ let raw = input.starts_with("r#");
++ let rest = input.advance((raw as usize) << 1);
++
++ let (rest, sym) = symbol_not_raw(rest)?;
++
++ if !raw {
++ let ident = crate::Ident::new(sym, crate::Span::call_site());
++ return Ok((rest, ident.into()));
++ }
+
+- let raw = input.starts_with("r#");
+- if raw {
+- chars.next();
+- chars.next();
++ if sym == "_" {
++ return Err(LexError);
+ }
+
++ let ident = crate::Ident::_new_raw(sym, crate::Span::call_site());
++ Ok((rest, ident.into()))
++}
++
++fn symbol_not_raw(input: Cursor) -> PResult<&str> {
++ let mut chars = input.char_indices();
++
+ match chars.next() {
+ Some((_, ch)) if is_ident_start(ch) => {}
+ _ => return Err(LexError),
+@@ -897,17 +920,7 @@
+ }
+ }
+
+- let a = &input.rest[..end];
+- if a == "r#_" {
+- Err(LexError)
+- } else {
+- let ident = if raw {
+- ::Ident::_new_raw(&a[2..], ::Span::call_site())
+- } else {
+- ::Ident::new(a, ::Span::call_site())
+- };
+- Ok((input.advance(end), ident.into()))
+- }
++ Ok((input.advance(end), &input.rest[..end]))
+ }
+
+ fn literal(input: Cursor) -> PResult<Literal> {
+@@ -947,10 +960,12 @@
+ ) => { |_| () }
+ ));
+
+-named!(quoted_string -> (), delimited!(
+- punct!("\""),
+- cooked_string,
+- tag!("\"")
++named!(quoted_string -> (), do_parse!(
++ punct!("\"") >>
++ cooked_string >>
++ tag!("\"") >>
++ option!(symbol_not_raw) >>
++ (())
+ ));
+
+ fn cooked_string(input: Cursor) -> PResult<()> {
+@@ -1159,8 +1174,8 @@
+ where
+ I: Iterator<Item = (usize, char)>,
+ {
+- next_ch!(chars @ '0'...'7');
+- next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F');
++ next_ch!(chars @ '0'..='7');
++ next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F');
+ true
+ }
+
+@@ -1168,8 +1183,8 @@
+ where
+ I: Iterator<Item = (usize, u8)>,
+ {
+- next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F');
+- next_ch!(chars @ b'0'...b'9' | b'a'...b'f' | b'A'...b'F');
++ next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F');
++ next_ch!(chars @ b'0'..=b'9' | b'a'..=b'f' | b'A'..=b'F');
+ true
+ }
+
+@@ -1178,9 +1193,9 @@
+ I: Iterator<Item = (usize, char)>,
+ {
+ next_ch!(chars @ '{');
+- next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F');
++ next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F');
+ loop {
+- let c = next_ch!(chars @ '0'...'9' | 'a'...'f' | 'A'...'F' | '_' | '}');
++ let c = next_ch!(chars @ '0'..='9' | 'a'..='f' | 'A'..='F' | '_' | '}');
+ if c == '}' {
+ return true;
+ }
+@@ -1188,10 +1203,10 @@
+ }
+
+ fn float(input: Cursor) -> PResult<()> {
+- let (rest, ()) = float_digits(input)?;
+- for suffix in &["f32", "f64"] {
+- if rest.starts_with(suffix) {
+- return word_break(rest.advance(suffix.len()));
++ let (mut rest, ()) = float_digits(input)?;
++ if let Some(ch) = rest.chars().next() {
++ if is_ident_start(ch) {
++ rest = symbol_not_raw(rest)?.0;
+ }
+ }
+ word_break(rest)
+@@ -1209,7 +1224,7 @@
+ let mut has_exp = false;
+ while let Some(&ch) = chars.peek() {
+ match ch {
+- '0'...'9' | '_' => {
++ '0'..='9' | '_' => {
+ chars.next();
+ len += 1;
+ }
+@@ -1220,7 +1235,7 @@
+ chars.next();
+ if chars
+ .peek()
+- .map(|&ch| ch == '.' || UnicodeXID::is_xid_start(ch))
++ .map(|&ch| ch == '.' || is_ident_start(ch))
+ .unwrap_or(false)
+ {
+ return Err(LexError);
+@@ -1254,7 +1269,7 @@
+ chars.next();
+ len += 1;
+ }
+- '0'...'9' => {
++ '0'..='9' => {
+ chars.next();
+ len += 1;
+ has_exp_value = true;
+@@ -1275,12 +1290,10 @@
+ }
+
+ fn int(input: Cursor) -> PResult<()> {
+- let (rest, ()) = digits(input)?;
+- for suffix in &[
+- "isize", "i8", "i16", "i32", "i64", "i128", "usize", "u8", "u16", "u32", "u64", "u128",
+- ] {
+- if rest.starts_with(suffix) {
+- return word_break(rest.advance(suffix.len()));
++ let (mut rest, ()) = digits(input)?;
++ if let Some(ch) = rest.chars().next() {
++ if is_ident_start(ch) {
++ rest = symbol_not_raw(rest)?.0;
+ }
+ }
+ word_break(rest)
+@@ -1304,9 +1317,9 @@
+ let mut empty = true;
+ for b in input.bytes() {
+ let digit = match b {
+- b'0'...b'9' => (b - b'0') as u64,
+- b'a'...b'f' => 10 + (b - b'a') as u64,
+- b'A'...b'F' => 10 + (b - b'A') as u64,
++ b'0'..=b'9' => (b - b'0') as u64,
++ b'a'..=b'f' => 10 + (b - b'a') as u64,
++ b'A'..=b'F' => 10 + (b - b'A') as u64,
+ b'_' => {
+ if empty && base == 10 {
+ return Err(LexError);
+@@ -1376,15 +1389,15 @@
+ trees.push(Punct::new('!', Spacing::Alone).into());
+ }
+ let mut stream = vec![
+- TokenTree::Ident(::Ident::new("doc", span)),
++ TokenTree::Ident(crate::Ident::new("doc", span)),
+ TokenTree::Punct(Punct::new('=', Spacing::Alone)),
+- TokenTree::Literal(::Literal::string(comment)),
++ TokenTree::Literal(crate::Literal::string(comment)),
+ ];
+ for tt in stream.iter_mut() {
+ tt.set_span(span);
+ }
+ let group = Group::new(Delimiter::Bracket, stream.into_iter().collect());
+- trees.push(::Group::_new_stable(group).into());
++ trees.push(crate::Group::_new_stable(group).into());
+ for tt in trees.iter_mut() {
+ tt.set_span(span);
+ }
+diff --git a/third_party/rust/proc-macro2/src/lib.rs b/third_party/rust/proc-macro2/src/lib.rs
+--- a/third_party/rust/proc-macro2/src/lib.rs
++++ b/third_party/rust/proc-macro2/src/lib.rs
+@@ -1,5 +1,5 @@
+ //! A wrapper around the procedural macro API of the compiler's [`proc_macro`]
+-//! crate. This library serves three purposes:
++//! crate. This library serves two purposes:
+ //!
+ //! [`proc_macro`]: https://doc.rust-lang.org/proc_macro/
+ //!
+@@ -18,14 +18,6 @@
+ //! a macro to be testable in isolation, they must be implemented using
+ //! `proc_macro2`.
+ //!
+-//! - **Provide the latest and greatest APIs across all compiler versions.**
+-//! Procedural macros were first introduced to Rust in 1.15.0 with an
+-//! extremely minimal interface. Since then, many improvements have landed to
+-//! make macros more flexible and easier to write. This library tracks the
+-//! procedural macro API of the most recent stable compiler but employs a
+-//! polyfill to provide that API consistently across any compiler since
+-//! 1.15.0.
+-//!
+ //! [syn]: https://github.com/dtolnay/syn
+ //! [quote]: https://github.com/dtolnay/quote
+ //!
+@@ -33,12 +25,13 @@
+ //!
+ //! The skeleton of a typical procedural macro typically looks like this:
+ //!
+-//! ```edition2018
++//! ```
+ //! extern crate proc_macro;
+ //!
+ //! # const IGNORE: &str = stringify! {
+ //! #[proc_macro_derive(MyDerive)]
+ //! # };
++//! # #[cfg(wrap_proc_macro)]
+ //! pub fn my_derive(input: proc_macro::TokenStream) -> proc_macro::TokenStream {
+ //! let input = proc_macro2::TokenStream::from(input);
+ //!
+@@ -54,7 +47,7 @@
+ //! If parsing with [Syn], you'll use [`parse_macro_input!`] instead to
+ //! propagate parse errors correctly back to the compiler when parsing fails.
+ //!
+-//! [`parse_macro_input!`]: https://docs.rs/syn/0.15/syn/macro.parse_macro_input.html
++//! [`parse_macro_input!`]: https://docs.rs/syn/1.0/syn/macro.parse_macro_input.html
+ //!
+ //! # Unstable features
+ //!
+@@ -64,9 +57,9 @@
+ //!
+ //! To opt into the additional APIs available in the most recent nightly
+ //! compiler, the `procmacro2_semver_exempt` config flag must be passed to
+-//! rustc. As usual, we will polyfill those nightly-only APIs all the way back
+-//! to Rust 1.15.0. As these are unstable APIs that track the nightly compiler,
+-//! minor versions of proc-macro2 may make breaking changes to them at any time.
++//! rustc. We will polyfill those nightly-only APIs back to Rust 1.31.0. As
++//! these are unstable APIs that track the nightly compiler, minor versions of
++//! proc-macro2 may make breaking changes to them at any time.
+ //!
+ //! ```sh
+ //! RUSTFLAGS='--cfg procmacro2_semver_exempt' cargo build
+@@ -77,21 +70,27 @@
+ //! as a reminder that you are outside of the normal semver guarantees.
+ //!
+ //! Semver exempt methods are marked as such in the proc-macro2 documentation.
++//!
++//! # Thread-Safety
++//!
++//! Most types in this crate are `!Sync` because the underlying compiler
++//! types make use of thread-local memory, meaning they cannot be accessed from
++//! a different thread.
+
+ // Proc-macro2 types in rustdoc of other crates get linked to here.
+-#![doc(html_root_url = "https://docs.rs/proc-macro2/0.4.27")]
+-#![cfg_attr(nightly, feature(proc_macro_span))]
++#![doc(html_root_url = "https://docs.rs/proc-macro2/1.0.4")]
++#![cfg_attr(any(proc_macro_span, super_unstable), feature(proc_macro_span))]
+ #![cfg_attr(super_unstable, feature(proc_macro_raw_ident, proc_macro_def_site))]
+
+ #[cfg(use_proc_macro)]
+ extern crate proc_macro;
+-extern crate unicode_xid;
+
+ use std::cmp::Ordering;
+ use std::fmt;
+ use std::hash::{Hash, Hasher};
+ use std::iter::FromIterator;
+ use std::marker;
++use std::ops::RangeBounds;
+ #[cfg(procmacro2_semver_exempt)]
+ use std::path::PathBuf;
+ use std::rc::Rc;
+@@ -102,7 +101,7 @@
+ mod fallback;
+
+ #[cfg(not(wrap_proc_macro))]
+-use fallback as imp;
++use crate::fallback as imp;
+ #[path = "wrapper.rs"]
+ #[cfg(wrap_proc_macro)]
+ mod imp;
+@@ -129,7 +128,7 @@
+ impl TokenStream {
+ fn _new(inner: imp::TokenStream) -> TokenStream {
+ TokenStream {
+- inner: inner,
++ inner,
+ _marker: marker::PhantomData,
+ }
+ }
+@@ -146,11 +145,6 @@
+ TokenStream::_new(imp::TokenStream::new())
+ }
+
+- #[deprecated(since = "0.4.4", note = "please use TokenStream::new")]
+- pub fn empty() -> TokenStream {
+- TokenStream::new()
+- }
+-
+ /// Checks if this `TokenStream` is empty.
+ pub fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+@@ -199,6 +193,12 @@
+ }
+ }
+
++impl From<TokenTree> for TokenStream {
++ fn from(token: TokenTree) -> Self {
++ TokenStream::_new(imp::TokenStream::from(token))
++ }
++}
++
+ impl Extend<TokenTree> for TokenStream {
+ fn extend<I: IntoIterator<Item = TokenTree>>(&mut self, streams: I) {
+ self.inner.extend(streams)
+@@ -261,7 +261,7 @@
+ impl SourceFile {
+ fn _new(inner: imp::SourceFile) -> Self {
+ SourceFile {
+- inner: inner,
++ inner,
+ _marker: marker::PhantomData,
+ }
+ }
+@@ -301,6 +301,7 @@
+ ///
+ /// This type is semver exempt and not exposed by default.
+ #[cfg(span_locations)]
++#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+ pub struct LineColumn {
+ /// The 1-indexed line in the source file on which the span starts or ends
+ /// (inclusive).
+@@ -320,7 +321,7 @@
+ impl Span {
+ fn _new(inner: imp::Span) -> Span {
+ Span {
+- inner: inner,
++ inner,
+ _marker: marker::PhantomData,
+ }
+ }
+@@ -403,10 +404,7 @@
+ #[cfg(span_locations)]
+ pub fn start(&self) -> LineColumn {
+ let imp::LineColumn { line, column } = self.inner.start();
+- LineColumn {
+- line: line,
+- column: column,
+- }
++ LineColumn { line, column }
+ }
+
+ /// Get the ending line/column in the source file for this span.
+@@ -415,23 +413,23 @@
+ #[cfg(span_locations)]
+ pub fn end(&self) -> LineColumn {
+ let imp::LineColumn { line, column } = self.inner.end();
+- LineColumn {
+- line: line,
+- column: column,
+- }
++ LineColumn { line, column }
+ }
+
+ /// Create a new span encompassing `self` and `other`.
+ ///
+ /// Returns `None` if `self` and `other` are from different files.
+ ///
+- /// This method is semver exempt and not exposed by default.
+- #[cfg(procmacro2_semver_exempt)]
++ /// Warning: the underlying [`proc_macro::Span::join`] method is
++ /// nightly-only. When called from within a procedural macro not using a
++ /// nightly compiler, this method will always return `None`.
++ ///
++ /// [`proc_macro::Span::join`]: https://doc.rust-lang.org/proc_macro/struct.Span.html#method.join
+ pub fn join(&self, other: Span) -> Option<Span> {
+ self.inner.join(other.inner).map(Span::_new)
+ }
+
+- /// Compares to spans to see if they're equal.
++ /// Compares two spans to see if they're equal.
+ ///
+ /// This method is semver exempt and not exposed by default.
+ #[cfg(procmacro2_semver_exempt)]
+@@ -575,7 +573,7 @@
+
+ impl Group {
+ fn _new(inner: imp::Group) -> Self {
+- Group { inner: inner }
++ Group { inner }
+ }
+
+ fn _new_stable(inner: fallback::Group) -> Self {
+@@ -625,7 +623,6 @@
+ /// pub fn span_open(&self) -> Span {
+ /// ^
+ /// ```
+- #[cfg(procmacro2_semver_exempt)]
+ pub fn span_open(&self) -> Span {
+ Span::_new(self.inner.span_open())
+ }
+@@ -636,7 +633,6 @@
+ /// pub fn span_close(&self) -> Span {
+ /// ^
+ /// ```
+- #[cfg(procmacro2_semver_exempt)]
+ pub fn span_close(&self) -> Span {
+ Span::_new(self.inner.span_close())
+ }
+@@ -684,7 +680,7 @@
+ pub enum Spacing {
+ /// E.g. `+` is `Alone` in `+ =`, `+ident` or `+()`.
+ Alone,
+- /// E.g. `+` is `Joint` in `+=` or `'#`.
++ /// E.g. `+` is `Joint` in `+=` or `'` is `Joint` in `'#`.
+ ///
+ /// Additionally, single quote `'` can join with identifiers to form
+ /// lifetimes `'ident`.
+@@ -701,8 +697,8 @@
+ /// which can be further configured with the `set_span` method below.
+ pub fn new(op: char, spacing: Spacing) -> Punct {
+ Punct {
+- op: op,
+- spacing: spacing,
++ op,
++ spacing,
+ span: Span::call_site(),
+ }
+ }
+@@ -764,7 +760,7 @@
+ /// Rust keywords. Use `input.call(Ident::parse_any)` when parsing to match the
+ /// behaviour of `Ident::new`.
+ ///
+-/// [`Parse`]: https://docs.rs/syn/0.15/syn/parse/trait.Parse.html
++/// [`Parse`]: https://docs.rs/syn/1.0/syn/parse/trait.Parse.html
+ ///
+ /// # Examples
+ ///
+@@ -772,7 +768,7 @@
+ /// A span must be provided explicitly which governs the name resolution
+ /// behavior of the resulting identifier.
+ ///
+-/// ```edition2018
++/// ```
+ /// use proc_macro2::{Ident, Span};
+ ///
+ /// fn main() {
+@@ -784,7 +780,7 @@
+ ///
+ /// An ident can be interpolated into a token stream using the `quote!` macro.
+ ///
+-/// ```edition2018
++/// ```
+ /// use proc_macro2::{Ident, Span};
+ /// use quote::quote;
+ ///
+@@ -803,7 +799,7 @@
+ /// A string representation of the ident is available through the `to_string()`
+ /// method.
+ ///
+-/// ```edition2018
++/// ```
+ /// # use proc_macro2::{Ident, Span};
+ /// #
+ /// # let ident = Ident::new("another_identifier", Span::call_site());
+@@ -823,7 +819,7 @@
+ impl Ident {
+ fn _new(inner: imp::Ident) -> Ident {
+ Ident {
+- inner: inner,
++ inner,
+ _marker: marker::PhantomData,
+ }
+ }
+@@ -853,7 +849,12 @@
+ /// # Panics
+ ///
+ /// Panics if the input string is neither a keyword nor a legal variable
+- /// name.
++ /// name. If you are not sure whether the string contains an identifier and
++ /// need to handle an error case, use
++ /// <a href="https://docs.rs/syn/1.0/syn/fn.parse_str.html"><code
++ /// style="padding-right:0;">syn::parse_str</code></a><code
++ /// style="padding-left:0;">::&lt;Ident&gt;</code>
++ /// rather than `Ident::new`.
+ pub fn new(string: &str, span: Span) -> Ident {
+ Ident::_new(imp::Ident::new(string, span.inner))
+ }
+@@ -986,7 +987,7 @@
+ impl Literal {
+ fn _new(inner: imp::Literal) -> Literal {
+ Literal {
+- inner: inner,
++ inner,
+ _marker: marker::PhantomData,
+ }
+ }
+@@ -1003,44 +1004,62 @@
+ u16_suffixed => u16,
+ u32_suffixed => u32,
+ u64_suffixed => u64,
++ u128_suffixed => u128,
+ usize_suffixed => usize,
+ i8_suffixed => i8,
+ i16_suffixed => i16,
+ i32_suffixed => i32,
+ i64_suffixed => i64,
++ i128_suffixed => i128,
+ isize_suffixed => isize,
+ }
+
+- #[cfg(u128)]
+- suffixed_int_literals! {
+- u128_suffixed => u128,
+- i128_suffixed => i128,
+- }
+-
+ unsuffixed_int_literals! {
+ u8_unsuffixed => u8,
+ u16_unsuffixed => u16,
+ u32_unsuffixed => u32,
+ u64_unsuffixed => u64,
++ u128_unsuffixed => u128,
+ usize_unsuffixed => usize,
+ i8_unsuffixed => i8,
+ i16_unsuffixed => i16,
+ i32_unsuffixed => i32,
+ i64_unsuffixed => i64,
++ i128_unsuffixed => i128,
+ isize_unsuffixed => isize,
+ }
+
+- #[cfg(u128)]
+- unsuffixed_int_literals! {
+- u128_unsuffixed => u128,
+- i128_unsuffixed => i128,
+- }
+-
++ /// Creates a new unsuffixed floating-point literal.
++ ///
++ /// This constructor is similar to those like `Literal::i8_unsuffixed` where
++ /// the float's value is emitted directly into the token but no suffix is
++ /// used, so it may be inferred to be a `f64` later in the compiler.
++ /// Literals created from negative numbers may not survive rountrips through
++ /// `TokenStream` or strings and may be broken into two tokens (`-` and
++ /// positive literal).
++ ///
++ /// # Panics
++ ///
++ /// This function requires that the specified float is finite, for example
++ /// if it is infinity or NaN this function will panic.
+ pub fn f64_unsuffixed(f: f64) -> Literal {
+ assert!(f.is_finite());
+ Literal::_new(imp::Literal::f64_unsuffixed(f))
+ }
+
++ /// Creates a new suffixed floating-point literal.
++ ///
++ /// This constructor will create a literal like `1.0f64` where the value
++ /// specified is the preceding part of the token and `f64` is the suffix of
++ /// the token. This token will always be inferred to be an `f64` in the
++ /// compiler. Literals created from negative numbers may not survive
++ /// rountrips through `TokenStream` or strings and may be broken into two
++ /// tokens (`-` and positive literal).
++ ///
++ /// # Panics
++ ///
++ /// This function requires that the specified float is finite, for example
++ /// if it is infinity or NaN this function will panic.
+ pub fn f64_suffixed(f: f64) -> Literal {
+ assert!(f.is_finite());
+ Literal::_new(imp::Literal::f64_suffixed(f))
+@@ -1064,30 +1083,61 @@
+ Literal::_new(imp::Literal::f32_unsuffixed(f))
+ }
+
++ /// Creates a new suffixed floating-point literal.
++ ///
++ /// This constructor will create a literal like `1.0f32` where the value
++ /// specified is the preceding part of the token and `f32` is the suffix of
++ /// the token. This token will always be inferred to be an `f32` in the
++ /// compiler. Literals created from negative numbers may not survive
++ /// rountrips through `TokenStream` or strings and may be broken into two
++ /// tokens (`-` and positive literal).
++ ///
++ /// # Panics
++ ///
++ /// This function requires that the specified float is finite, for example
++ /// if it is infinity or NaN this function will panic.
+ pub fn f32_suffixed(f: f32) -> Literal {
+ assert!(f.is_finite());
+ Literal::_new(imp::Literal::f32_suffixed(f))
+ }
+
++ /// String literal.
+ pub fn string(string: &str) -> Literal {
+ Literal::_new(imp::Literal::string(string))
+ }
+
++ /// Character literal.
+ pub fn character(ch: char) -> Literal {
+ Literal::_new(imp::Literal::character(ch))
+ }
+
++ /// Byte string literal.
+ pub fn byte_string(s: &[u8]) -> Literal {
+ Literal::_new(imp::Literal::byte_string(s))
+ }
+
++ /// Returns the span encompassing this literal.
+ pub fn span(&self) -> Span {
+ Span::_new(self.inner.span())
+ }
+
++ /// Configures the span associated for this literal.
+ pub fn set_span(&mut self, span: Span) {
+ self.inner.set_span(span.inner);
+ }
++
++ /// Returns a `Span` that is a subset of `self.span()` containing only
++ /// the source bytes in range `range`. Returns `None` if the would-be
++ /// trimmed span is outside the bounds of `self`.
++ ///
++ /// Warning: the underlying [`proc_macro::Literal::subspan`] method is
++ /// nightly-only. When called from within a procedural macro not using a
++ /// nightly compiler, this method will always return `None`.
++ ///
++ /// [`proc_macro::Literal::subspan`]: https://doc.rust-lang.org/proc_macro/struct.Literal.html#method.subspan
++ pub fn subspan<R: RangeBounds<usize>>(&self, range: R) -> Option<Span> {
++ self.inner.subspan(range).map(Span::_new)
++ }
+ }
+
+ impl fmt::Debug for Literal {
+@@ -1108,14 +1158,14 @@
+ use std::marker;
+ use std::rc::Rc;
+
+- use imp;
+- pub use TokenStream;
+- use TokenTree;
++ pub use crate::TokenStream;
++ use crate::{imp, TokenTree};
+
+ /// An iterator over `TokenStream`'s `TokenTree`s.
+ ///
+ /// The iteration is "shallow", e.g. the iterator doesn't recurse into
+ /// delimited groups, and returns whole groups as token trees.
++ #[derive(Clone)]
+ pub struct IntoIter {
+ inner: imp::TokenTreeIter,
+ _marker: marker::PhantomData<Rc<()>>,
+diff --git a/third_party/rust/proc-macro2/src/strnom.rs b/third_party/rust/proc-macro2/src/strnom.rs
+--- a/third_party/rust/proc-macro2/src/strnom.rs
++++ b/third_party/rust/proc-macro2/src/strnom.rs
+@@ -1,11 +1,9 @@
+ //! Adapted from [`nom`](https://github.com/Geal/nom).
+
++use crate::fallback::LexError;
+ use std::str::{Bytes, CharIndices, Chars};
+-
+ use unicode_xid::UnicodeXID;
+
+-use fallback::LexError;
+-
+ #[derive(Copy, Clone, Eq, PartialEq)]
+ pub struct Cursor<'a> {
+ pub rest: &'a str,
+@@ -95,7 +93,7 @@
+ }
+ }
+ match bytes[i] {
+- b' ' | 0x09...0x0d => {
++ b' ' | 0x09..=0x0d => {
+ i += 1;
+ continue;
+ }
+diff --git a/third_party/rust/proc-macro2/src/wrapper.rs b/third_party/rust/proc-macro2/src/wrapper.rs
+--- a/third_party/rust/proc-macro2/src/wrapper.rs
++++ b/third_party/rust/proc-macro2/src/wrapper.rs
+@@ -1,14 +1,12 @@
+ use std::fmt;
+ use std::iter;
++use std::ops::RangeBounds;
+ use std::panic::{self, PanicInfo};
+ #[cfg(super_unstable)]
+ use std::path::PathBuf;
+ use std::str::FromStr;
+
+-use fallback;
+-use proc_macro;
+-
+-use {Delimiter, Punct, Spacing, TokenTree};
++use crate::{fallback, Delimiter, Punct, Spacing, TokenTree};
+
+ #[derive(Clone)]
+ pub enum TokenStream {
+@@ -25,7 +23,7 @@
+ use std::sync::atomic::*;
+ use std::sync::Once;
+
+- static WORKS: AtomicUsize = ATOMIC_USIZE_INIT;
++ static WORKS: AtomicUsize = AtomicUsize::new(0);
+ static INIT: Once = Once::new();
+
+ match WORKS.load(Ordering::SeqCst) {
+@@ -59,7 +57,7 @@
+ // not occur, they need to call e.g. `proc_macro2::Span::call_site()` from
+ // the main thread before launching any other threads.
+ INIT.call_once(|| {
+- type PanicHook = Fn(&PanicInfo) + Sync + Send + 'static;
++ type PanicHook = dyn Fn(&PanicInfo) + Sync + Send + 'static;
+
+ let null_hook: Box<PanicHook> = Box::new(|_panic_info| { /* ignore */ });
+ let sanity_check = &*null_hook as *const PanicHook;
+@@ -199,17 +197,6 @@
+ fn from_iter<I: IntoIterator<Item = TokenStream>>(streams: I) -> Self {
+ let mut streams = streams.into_iter();
+ match streams.next() {
+- #[cfg(slow_extend)]
+- Some(TokenStream::Compiler(first)) => {
+- let stream = iter::once(first)
+- .chain(streams.map(|s| match s {
+- TokenStream::Compiler(s) => s,
+- TokenStream::Fallback(_) => mismatch(),
+- }))
+- .collect();
+- TokenStream::Compiler(stream)
+- }
+- #[cfg(not(slow_extend))]
+ Some(TokenStream::Compiler(mut first)) => {
+ first.extend(streams.map(|s| match s {
+ TokenStream::Compiler(s) => s,
+@@ -233,27 +220,11 @@
+ fn extend<I: IntoIterator<Item = TokenTree>>(&mut self, streams: I) {
+ match self {
+ TokenStream::Compiler(tts) => {
+- #[cfg(not(slow_extend))]
+- {
+- tts.extend(
+- streams
+- .into_iter()
+- .map(|t| TokenStream::from(t).unwrap_nightly()),
+- );
+- }
+- #[cfg(slow_extend)]
+- {
+- *tts =
+- tts.clone()
+- .into_iter()
+- .chain(streams.into_iter().map(TokenStream::from).flat_map(
+- |t| match t {
+- TokenStream::Compiler(tts) => tts.into_iter(),
+- _ => mismatch(),
+- },
+- ))
+- .collect();
+- }
++ tts.extend(
++ streams
++ .into_iter()
++ .map(|t| TokenStream::from(t).unwrap_nightly()),
++ );
+ }
+ TokenStream::Fallback(tts) => tts.extend(streams),
+ }
+@@ -317,6 +288,7 @@
+ }
+ }
+
++#[derive(Clone)]
+ pub enum TokenTreeIter {
+ Compiler(proc_macro::token_stream::IntoIter),
+ Fallback(fallback::TokenTreeIter),
+@@ -343,18 +315,18 @@
+ TokenTreeIter::Fallback(iter) => return iter.next(),
+ };
+ Some(match token {
+- proc_macro::TokenTree::Group(tt) => ::Group::_new(Group::Compiler(tt)).into(),
++ proc_macro::TokenTree::Group(tt) => crate::Group::_new(Group::Compiler(tt)).into(),
+ proc_macro::TokenTree::Punct(tt) => {
+ let spacing = match tt.spacing() {
+ proc_macro::Spacing::Joint => Spacing::Joint,
+ proc_macro::Spacing::Alone => Spacing::Alone,
+ };
+ let mut o = Punct::new(tt.as_char(), spacing);
+- o.set_span(::Span::_new(Span::Compiler(tt.span())));
++ o.set_span(crate::Span::_new(Span::Compiler(tt.span())));
+ o.into()
+ }
+- proc_macro::TokenTree::Ident(s) => ::Ident::_new(Ident::Compiler(s)).into(),
+- proc_macro::TokenTree::Literal(l) => ::Literal::_new(Literal::Compiler(l)).into(),
++ proc_macro::TokenTree::Ident(s) => crate::Ident::_new(Ident::Compiler(s)).into(),
++ proc_macro::TokenTree::Literal(l) => crate::Literal::_new(Literal::Compiler(l)).into(),
+ })
+ }
+
+@@ -477,12 +449,12 @@
+ #[cfg(any(super_unstable, feature = "span-locations"))]
+ pub fn start(&self) -> LineColumn {
+ match self {
+- #[cfg(nightly)]
++ #[cfg(proc_macro_span)]
+ Span::Compiler(s) => {
+ let proc_macro::LineColumn { line, column } = s.start();
+ LineColumn { line, column }
+ }
+- #[cfg(not(nightly))]
++ #[cfg(not(proc_macro_span))]
+ Span::Compiler(_) => LineColumn { line: 0, column: 0 },
+ Span::Fallback(s) => {
+ let fallback::LineColumn { line, column } = s.start();
+@@ -494,12 +466,12 @@
+ #[cfg(any(super_unstable, feature = "span-locations"))]
+ pub fn end(&self) -> LineColumn {
+ match self {
+- #[cfg(nightly)]
++ #[cfg(proc_macro_span)]
+ Span::Compiler(s) => {
+ let proc_macro::LineColumn { line, column } = s.end();
+ LineColumn { line, column }
+ }
+- #[cfg(not(nightly))]
++ #[cfg(not(proc_macro_span))]
+ Span::Compiler(_) => LineColumn { line: 0, column: 0 },
+ Span::Fallback(s) => {
+ let fallback::LineColumn { line, column } = s.end();
+@@ -508,9 +480,9 @@
+ }
+ }
+
+- #[cfg(super_unstable)]
+ pub fn join(&self, other: Span) -> Option<Span> {
+ let ret = match (self, other) {
++ #[cfg(proc_macro_span)]
+ (Span::Compiler(a), Span::Compiler(b)) => Span::Compiler(a.join(b)?),
+ (Span::Fallback(a), Span::Fallback(b)) => Span::Fallback(a.join(b)?),
+ _ => return None,
+@@ -535,9 +507,9 @@
+ }
+ }
+
+-impl From<proc_macro::Span> for ::Span {
+- fn from(proc_span: proc_macro::Span) -> ::Span {
+- ::Span::_new(Span::Compiler(proc_span))
++impl From<proc_macro::Span> for crate::Span {
++ fn from(proc_span: proc_macro::Span) -> crate::Span {
++ crate::Span::_new(Span::Compiler(proc_span))
+ }
+ }
+
+@@ -615,18 +587,22 @@
+ }
+ }
+
+- #[cfg(super_unstable)]
+ pub fn span_open(&self) -> Span {
+ match self {
++ #[cfg(proc_macro_span)]
+ Group::Compiler(g) => Span::Compiler(g.span_open()),
++ #[cfg(not(proc_macro_span))]
++ Group::Compiler(g) => Span::Compiler(g.span()),
+ Group::Fallback(g) => Span::Fallback(g.span_open()),
+ }
+ }
+
+- #[cfg(super_unstable)]
+ pub fn span_close(&self) -> Span {
+ match self {
++ #[cfg(proc_macro_span)]
+ Group::Compiler(g) => Span::Compiler(g.span_close()),
++ #[cfg(not(proc_macro_span))]
++ Group::Compiler(g) => Span::Compiler(g.span()),
+ Group::Fallback(g) => Span::Fallback(g.span_close()),
+ }
+ }
+@@ -802,42 +778,34 @@
+ u16_suffixed => u16,
+ u32_suffixed => u32,
+ u64_suffixed => u64,
++ u128_suffixed => u128,
+ usize_suffixed => usize,
+ i8_suffixed => i8,
+ i16_suffixed => i16,
+ i32_suffixed => i32,
+ i64_suffixed => i64,
++ i128_suffixed => i128,
+ isize_suffixed => isize,
+
+ f32_suffixed => f32,
+ f64_suffixed => f64,
+ }
+
+- #[cfg(u128)]
+- suffixed_numbers! {
+- i128_suffixed => i128,
+- u128_suffixed => u128,
+- }
+-
+ unsuffixed_integers! {
+ u8_unsuffixed => u8,
+ u16_unsuffixed => u16,
+ u32_unsuffixed => u32,
+ u64_unsuffixed => u64,
++ u128_unsuffixed => u128,
+ usize_unsuffixed => usize,
+ i8_unsuffixed => i8,
+ i16_unsuffixed => i16,
+ i32_unsuffixed => i32,
+ i64_unsuffixed => i64,
++ i128_unsuffixed => i128,
+ isize_unsuffixed => isize,
+ }
+
+- #[cfg(u128)]
+- unsuffixed_integers! {
+- i128_unsuffixed => i128,
+- u128_unsuffixed => u128,
+- }
+-
+ pub fn f32_unsuffixed(f: f32) -> Literal {
+ if nightly_works() {
+ Literal::Compiler(proc_macro::Literal::f32_unsuffixed(f))
+@@ -893,6 +861,16 @@
+ }
+ }
+
++ pub fn subspan<R: RangeBounds<usize>>(&self, range: R) -> Option<Span> {
++ match self {
++ #[cfg(proc_macro_span)]
++ Literal::Compiler(lit) => lit.subspan(range).map(Span::Compiler),
++ #[cfg(not(proc_macro_span))]
++ Literal::Compiler(_lit) => None,
++ Literal::Fallback(lit) => lit.subspan(range).map(Span::Fallback),
++ }
++ }
++
+ fn unwrap_nightly(self) -> proc_macro::Literal {
+ match self {
+ Literal::Compiler(s) => s,
+diff --git a/third_party/rust/proc-macro2/tests/features.rs b/third_party/rust/proc-macro2/tests/features.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/proc-macro2/tests/features.rs
+@@ -0,0 +1,8 @@
++#[test]
++#[ignore]
++fn make_sure_no_proc_macro() {
++ assert!(
++ !cfg!(feature = "proc-macro"),
++ "still compiled with proc_macro?"
++ );
++}
+diff --git a/third_party/rust/proc-macro2/tests/marker.rs b/third_party/rust/proc-macro2/tests/marker.rs
+--- a/third_party/rust/proc-macro2/tests/marker.rs
++++ b/third_party/rust/proc-macro2/tests/marker.rs
+@@ -1,5 +1,3 @@
+-extern crate proc_macro2;
+-
+ use proc_macro2::*;
+
+ macro_rules! assert_impl {
+diff --git a/third_party/rust/proc-macro2/tests/test.rs b/third_party/rust/proc-macro2/tests/test.rs
+--- a/third_party/rust/proc-macro2/tests/test.rs
++++ b/third_party/rust/proc-macro2/tests/test.rs
+@@ -1,11 +1,9 @@
+-extern crate proc_macro2;
+-
+ use std::str::{self, FromStr};
+
+ use proc_macro2::{Ident, Literal, Spacing, Span, TokenStream, TokenTree};
+
+ #[test]
+-fn terms() {
++fn idents() {
+ assert_eq!(
+ Ident::new("String", Span::call_site()).to_string(),
+ "String"
+@@ -16,7 +14,7 @@
+
+ #[test]
+ #[cfg(procmacro2_semver_exempt)]
+-fn raw_terms() {
++fn raw_idents() {
+ assert_eq!(
+ Ident::new_raw("String", Span::call_site()).to_string(),
+ "r#String"
+@@ -27,37 +25,37 @@
+
+ #[test]
+ #[should_panic(expected = "Ident is not allowed to be empty; use Option<Ident>")]
+-fn term_empty() {
++fn ident_empty() {
+ Ident::new("", Span::call_site());
+ }
+
+ #[test]
+ #[should_panic(expected = "Ident cannot be a number; use Literal instead")]
+-fn term_number() {
++fn ident_number() {
+ Ident::new("255", Span::call_site());
+ }
+
+ #[test]
+ #[should_panic(expected = "\"a#\" is not a valid Ident")]
+-fn term_invalid() {
++fn ident_invalid() {
+ Ident::new("a#", Span::call_site());
+ }
+
+ #[test]
+ #[should_panic(expected = "not a valid Ident")]
+-fn raw_term_empty() {
++fn raw_ident_empty() {
+ Ident::new("r#", Span::call_site());
+ }
+
+ #[test]
+ #[should_panic(expected = "not a valid Ident")]
+-fn raw_term_number() {
++fn raw_ident_number() {
+ Ident::new("r#255", Span::call_site());
+ }
+
+ #[test]
+ #[should_panic(expected = "\"r#a#\" is not a valid Ident")]
+-fn raw_term_invalid() {
++fn raw_ident_invalid() {
+ Ident::new("r#a#", Span::call_site());
+ }
+
+@@ -80,13 +78,41 @@
+ }
+
+ #[test]
+-fn literals() {
++fn literal_string() {
+ assert_eq!(Literal::string("foo").to_string(), "\"foo\"");
+ assert_eq!(Literal::string("\"").to_string(), "\"\\\"\"");
++ assert_eq!(Literal::string("didn't").to_string(), "\"didn't\"");
++}
++
++#[test]
++fn literal_character() {
++ assert_eq!(Literal::character('x').to_string(), "'x'");
++ assert_eq!(Literal::character('\'').to_string(), "'\\''");
++ assert_eq!(Literal::character('"').to_string(), "'\"'");
++}
++
++#[test]
++fn literal_float() {
+ assert_eq!(Literal::f32_unsuffixed(10.0).to_string(), "10.0");
+ }
+
+ #[test]
++fn literal_suffix() {
++ fn token_count(p: &str) -> usize {
++ p.parse::<TokenStream>().unwrap().into_iter().count()
++ }
++
++ assert_eq!(token_count("999u256"), 1);
++ assert_eq!(token_count("999r#u256"), 3);
++ assert_eq!(token_count("1."), 1);
++ assert_eq!(token_count("1.f32"), 3);
++ assert_eq!(token_count("1.0_0"), 1);
++ assert_eq!(token_count("1._0"), 3);
++ assert_eq!(token_count("1._m"), 3);
++ assert_eq!(token_count("\"\"s"), 1);
++}
++
++#[test]
+ fn roundtrip() {
+ fn roundtrip(p: &str) {
+ println!("parse: {}", p);
+@@ -113,6 +139,9 @@
+ 9
+ 0
+ 0xffffffffffffffffffffffffffffffff
++ 1x
++ 1u80
++ 1f320
+ ",
+ );
+ roundtrip("'a");
+@@ -129,9 +158,6 @@
+ panic!("should have failed to parse: {}\n{:#?}", p, s);
+ }
+ }
+- fail("1x");
+- fail("1u80");
+- fail("1f320");
+ fail("' static");
+ fail("r#1");
+ fail("r#_");
+@@ -339,6 +365,27 @@
+ delimiter: Bracket,
+ stream: TokenStream [
+ Ident {
++ sym: a,
++ },
++ Punct {
++ op: '+',
++ spacing: Alone,
++ },
++ Literal {
++ lit: 1,
++ },
++ ],
++ },
++]\
++ ";
++
++ #[cfg(not(procmacro2_semver_exempt))]
++ let expected_before_trailing_commas = "\
++TokenStream [
++ Group {
++ delimiter: Bracket,
++ stream: TokenStream [
++ Ident {
+ sym: a
+ },
+ Punct {
+@@ -361,6 +408,31 @@
+ stream: TokenStream [
+ Ident {
+ sym: a,
++ span: bytes(2..3),
++ },
++ Punct {
++ op: '+',
++ spacing: Alone,
++ span: bytes(4..5),
++ },
++ Literal {
++ lit: 1,
++ span: bytes(6..7),
++ },
++ ],
++ span: bytes(1..8),
++ },
++]\
++ ";
++
++ #[cfg(procmacro2_semver_exempt)]
++ let expected_before_trailing_commas = "\
++TokenStream [
++ Group {
++ delimiter: Bracket,
++ stream: TokenStream [
++ Ident {
++ sym: a,
+ span: bytes(2..3)
+ },
+ Punct {
+@@ -378,7 +450,12 @@
+ ]\
+ ";
+
+- assert_eq!(expected, format!("{:#?}", tts));
++ let actual = format!("{:#?}", tts);
++ if actual.ends_with(",\n]") {
++ assert_eq!(expected, actual);
++ } else {
++ assert_eq!(expected_before_trailing_commas, actual);
++ }
+ }
+
+ #[test]
+diff --git a/third_party/rust/quote-0.6.11/.cargo-checksum.json b/third_party/rust/quote-0.6.11/.cargo-checksum.json
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/.cargo-checksum.json
+@@ -0,0 +1 @@
++{"files":{"Cargo.toml":"68f4dc89836a05a2347086addab1849567ef8073c552ec0dfca8f96fd20550f9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"d9392d4c7af3bf9714f0a95801d64de46ffd4558cdfeea0eb85b414e555abb72","src/ext.rs":"03919239a20f8393288783a21bf6fdee12e405d13d162c9faa6f8f5ce54b003b","src/lib.rs":"5345b4d2e6f923724cec35c62d7397e6f04d5503d2d813bff7bbaa7ffc39a9cf","src/to_tokens.rs":"0dcd15cba2aa83abeb47b9a1babce7a29643b5efa2fe620b070cb37bb21a84f1","tests/conditional/integer128.rs":"d83e21a91efbaa801a82ae499111bdda2d31edaa620e78c0199eba42d69c9ee6","tests/test.rs":"810013d7fd77b738abd0ace90ce2f2f3e219c757652eabab29bc1c0ce4a73b24"},"package":"cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1"}
+\ No newline at end of file
+diff --git a/third_party/rust/quote-0.6.11/Cargo.toml b/third_party/rust/quote-0.6.11/Cargo.toml
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/Cargo.toml
+@@ -0,0 +1,33 @@
++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
++#
++# When uploading crates to the registry Cargo will automatically
++# "normalize" Cargo.toml files for maximal compatibility
++# with all versions of Cargo and also rewrite `path` dependencies
++# to registry (e.g. crates.io) dependencies
++#
++# If you believe there's an error in this file please file an
++# issue against the rust-lang/cargo repository. If you're
++# editing this file be aware that the upstream Cargo.toml
++# will likely look very different (and much more reasonable)
++
++[package]
++name = "quote"
++version = "0.6.11"
++authors = ["David Tolnay <dtolnay@gmail.com>"]
++include = ["Cargo.toml", "src/**/*.rs", "tests/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"]
++description = "Quasi-quoting macro quote!(...)"
++documentation = "https://docs.rs/quote/"
++readme = "README.md"
++keywords = ["syn"]
++categories = ["development-tools::procedural-macro-helpers"]
++license = "MIT/Apache-2.0"
++repository = "https://github.com/dtolnay/quote"
++[dependencies.proc-macro2]
++version = "0.4.21"
++default-features = false
++
++[features]
++default = ["proc-macro"]
++proc-macro = ["proc-macro2/proc-macro"]
++[badges.travis-ci]
++repository = "dtolnay/quote"
+diff --git a/third_party/rust/quote-0.6.11/LICENSE-APACHE b/third_party/rust/quote-0.6.11/LICENSE-APACHE
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/LICENSE-APACHE
+@@ -0,0 +1,201 @@
++ Apache License
++ Version 2.0, January 2004
++ http://www.apache.org/licenses/
++
++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++
++1. Definitions.
++
++ "License" shall mean the terms and conditions for use, reproduction,
++ and distribution as defined by Sections 1 through 9 of this document.
++
++ "Licensor" shall mean the copyright owner or entity authorized by
++ the copyright owner that is granting the License.
++
++ "Legal Entity" shall mean the union of the acting entity and all
++ other entities that control, are controlled by, or are under common
++ control with that entity. For the purposes of this definition,
++ "control" means (i) the power, direct or indirect, to cause the
++ direction or management of such entity, whether by contract or
++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++ outstanding shares, or (iii) beneficial ownership of such entity.
++
++ "You" (or "Your") shall mean an individual or Legal Entity
++ exercising permissions granted by this License.
++
++ "Source" form shall mean the preferred form for making modifications,
++ including but not limited to software source code, documentation
++ source, and configuration files.
++
++ "Object" form shall mean any form resulting from mechanical
++ transformation or translation of a Source form, including but
++ not limited to compiled object code, generated documentation,
++ and conversions to other media types.
++
++ "Work" shall mean the work of authorship, whether in Source or
++ Object form, made available under the License, as indicated by a
++ copyright notice that is included in or attached to the work
++ (an example is provided in the Appendix below).
++
++ "Derivative Works" shall mean any work, whether in Source or Object
++ form, that is based on (or derived from) the Work and for which the
++ editorial revisions, annotations, elaborations, or other modifications
++ represent, as a whole, an original work of authorship. For the purposes
++ of this License, Derivative Works shall not include works that remain
++ separable from, or merely link (or bind by name) to the interfaces of,
++ the Work and Derivative Works thereof.
++
++ "Contribution" shall mean any work of authorship, including
++ the original version of the Work and any modifications or additions
++ to that Work or Derivative Works thereof, that is intentionally
++ submitted to Licensor for inclusion in the Work by the copyright owner
++ or by an individual or Legal Entity authorized to submit on behalf of
++ the copyright owner. For the purposes of this definition, "submitted"
++ means any form of electronic, verbal, or written communication sent
++ to the Licensor or its representatives, including but not limited to
++ communication on electronic mailing lists, source code control systems,
++ and issue tracking systems that are managed by, or on behalf of, the
++ Licensor for the purpose of discussing and improving the Work, but
++ excluding communication that is conspicuously marked or otherwise
++ designated in writing by the copyright owner as "Not a Contribution."
++
++ "Contributor" shall mean Licensor and any individual or Legal Entity
++ on behalf of whom a Contribution has been received by Licensor and
++ subsequently incorporated within the Work.
++
++2. Grant of Copyright License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ copyright license to reproduce, prepare Derivative Works of,
++ publicly display, publicly perform, sublicense, and distribute the
++ Work and such Derivative Works in Source or Object form.
++
++3. Grant of Patent License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ (except as stated in this section) patent license to make, have made,
++ use, offer to sell, sell, import, and otherwise transfer the Work,
++ where such license applies only to those patent claims licensable
++ by such Contributor that are necessarily infringed by their
++ Contribution(s) alone or by combination of their Contribution(s)
++ with the Work to which such Contribution(s) was submitted. If You
++ institute patent litigation against any entity (including a
++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++ or a Contribution incorporated within the Work constitutes direct
++ or contributory patent infringement, then any patent licenses
++ granted to You under this License for that Work shall terminate
++ as of the date such litigation is filed.
++
++4. Redistribution. You may reproduce and distribute copies of the
++ Work or Derivative Works thereof in any medium, with or without
++ modifications, and in Source or Object form, provided that You
++ meet the following conditions:
++
++ (a) You must give any other recipients of the Work or
++ Derivative Works a copy of this License; and
++
++ (b) You must cause any modified files to carry prominent notices
++ stating that You changed the files; and
++
++ (c) You must retain, in the Source form of any Derivative Works
++ that You distribute, all copyright, patent, trademark, and
++ attribution notices from the Source form of the Work,
++ excluding those notices that do not pertain to any part of
++ the Derivative Works; and
++
++ (d) If the Work includes a "NOTICE" text file as part of its
++ distribution, then any Derivative Works that You distribute must
++ include a readable copy of the attribution notices contained
++ within such NOTICE file, excluding those notices that do not
++ pertain to any part of the Derivative Works, in at least one
++ of the following places: within a NOTICE text file distributed
++ as part of the Derivative Works; within the Source form or
++ documentation, if provided along with the Derivative Works; or,
++ within a display generated by the Derivative Works, if and
++ wherever such third-party notices normally appear. The contents
++ of the NOTICE file are for informational purposes only and
++ do not modify the License. You may add Your own attribution
++ notices within Derivative Works that You distribute, alongside
++ or as an addendum to the NOTICE text from the Work, provided
++ that such additional attribution notices cannot be construed
++ as modifying the License.
++
++ You may add Your own copyright statement to Your modifications and
++ may provide additional or different license terms and conditions
++ for use, reproduction, or distribution of Your modifications, or
++ for any such Derivative Works as a whole, provided Your use,
++ reproduction, and distribution of the Work otherwise complies with
++ the conditions stated in this License.
++
++5. Submission of Contributions. Unless You explicitly state otherwise,
++ any Contribution intentionally submitted for inclusion in the Work
++ by You to the Licensor shall be under the terms and conditions of
++ this License, without any additional terms or conditions.
++ Notwithstanding the above, nothing herein shall supersede or modify
++ the terms of any separate license agreement you may have executed
++ with Licensor regarding such Contributions.
++
++6. Trademarks. This License does not grant permission to use the trade
++ names, trademarks, service marks, or product names of the Licensor,
++ except as required for reasonable and customary use in describing the
++ origin of the Work and reproducing the content of the NOTICE file.
++
++7. Disclaimer of Warranty. Unless required by applicable law or
++ agreed to in writing, Licensor provides the Work (and each
++ Contributor provides its Contributions) on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++ implied, including, without limitation, any warranties or conditions
++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++ PARTICULAR PURPOSE. You are solely responsible for determining the
++ appropriateness of using or redistributing the Work and assume any
++ risks associated with Your exercise of permissions under this License.
++
++8. Limitation of Liability. In no event and under no legal theory,
++ whether in tort (including negligence), contract, or otherwise,
++ unless required by applicable law (such as deliberate and grossly
++ negligent acts) or agreed to in writing, shall any Contributor be
++ liable to You for damages, including any direct, indirect, special,
++ incidental, or consequential damages of any character arising as a
++ result of this License or out of the use or inability to use the
++ Work (including but not limited to damages for loss of goodwill,
++ work stoppage, computer failure or malfunction, or any and all
++ other commercial damages or losses), even if such Contributor
++ has been advised of the possibility of such damages.
++
++9. Accepting Warranty or Additional Liability. While redistributing
++ the Work or Derivative Works thereof, You may choose to offer,
++ and charge a fee for, acceptance of support, warranty, indemnity,
++ or other liability obligations and/or rights consistent with this
++ License. However, in accepting such obligations, You may act only
++ on Your own behalf and on Your sole responsibility, not on behalf
++ of any other Contributor, and only if You agree to indemnify,
++ defend, and hold each Contributor harmless for any liability
++ incurred by, or claims asserted against, such Contributor by reason
++ of your accepting any such warranty or additional liability.
++
++END OF TERMS AND CONDITIONS
++
++APPENDIX: How to apply the Apache License to your work.
++
++ To apply the Apache License to your work, attach the following
++ boilerplate notice, with the fields enclosed by brackets "[]"
++ replaced with your own identifying information. (Don't include
++ the brackets!) The text should be enclosed in the appropriate
++ comment syntax for the file format. We also recommend that a
++ file or class name and description of purpose be included on the
++ same "printed page" as the copyright notice for easier
++ identification within third-party archives.
++
++Copyright [yyyy] [name of copyright owner]
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
+diff --git a/third_party/rust/quote-0.6.11/LICENSE-MIT b/third_party/rust/quote-0.6.11/LICENSE-MIT
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/LICENSE-MIT
+@@ -0,0 +1,25 @@
++Copyright (c) 2016 The Rust Project Developers
++
++Permission is hereby granted, free of charge, to any
++person obtaining a copy of this software and associated
++documentation files (the "Software"), to deal in the
++Software without restriction, including without
++limitation the rights to use, copy, modify, merge,
++publish, distribute, sublicense, and/or sell copies of
++the Software, and to permit persons to whom the Software
++is furnished to do so, subject to the following
++conditions:
++
++The above copyright notice and this permission notice
++shall be included in all copies or substantial portions
++of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
+diff --git a/third_party/rust/quote-0.6.11/README.md b/third_party/rust/quote-0.6.11/README.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/README.md
+@@ -0,0 +1,241 @@
++Rust Quasi-Quoting
++==================
++
++[![Build Status](https://api.travis-ci.org/dtolnay/quote.svg?branch=master)](https://travis-ci.org/dtolnay/quote)
++[![Latest Version](https://img.shields.io/crates/v/quote.svg)](https://crates.io/crates/quote)
++[![Rust Documentation](https://img.shields.io/badge/api-rustdoc-blue.svg)](https://docs.rs/quote/)
++
++This crate provides the [`quote!`] macro for turning Rust syntax tree data
++structures into tokens of source code.
++
++[`quote!`]: https://docs.rs/quote/0.6/quote/macro.quote.html
++
++Procedural macros in Rust receive a stream of tokens as input, execute arbitrary
++Rust code to determine how to manipulate those tokens, and produce a stream of
++tokens to hand back to the compiler to compile into the caller's crate.
++Quasi-quoting is a solution to one piece of that -- producing tokens to return
++to the compiler.
++
++The idea of quasi-quoting is that we write *code* that we treat as *data*.
++Within the `quote!` macro, we can write what looks like code to our text editor
++or IDE. We get all the benefits of the editor's brace matching, syntax
++highlighting, indentation, and maybe autocompletion. But rather than compiling
++that as code into the current crate, we can treat it as data, pass it around,
++mutate it, and eventually hand it back to the compiler as tokens to compile into
++the macro caller's crate.
++
++This crate is motivated by the procedural macro use case, but is a
++general-purpose Rust quasi-quoting library and is not specific to procedural
++macros.
++
++*Version requirement: Quote supports any compiler version back to Rust's very
++first support for procedural macros in Rust 1.15.0.*
++
++[*Release notes*](https://github.com/dtolnay/quote/releases)
++
++```toml
++[dependencies]
++quote = "0.6"
++```
++
++## Syntax
++
++The quote crate provides a [`quote!`] macro within which you can write Rust code
++that gets packaged into a [`TokenStream`] and can be treated as data. You should
++think of `TokenStream` as representing a fragment of Rust source code.
++
++[`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html
++
++Within the `quote!` macro, interpolation is done with `#var`. Any type
++implementing the [`quote::ToTokens`] trait can be interpolated. This includes
++most Rust primitive types as well as most of the syntax tree types from [`syn`].
++
++[`quote::ToTokens`]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html
++[`syn`]: https://github.com/dtolnay/syn
++
++```rust
++let tokens = quote! {
++ struct SerializeWith #generics #where_clause {
++ value: &'a #field_ty,
++ phantom: core::marker::PhantomData<#item_ty>,
++ }
++
++ impl #generics serde::Serialize for SerializeWith #generics #where_clause {
++ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
++ where
++ S: serde::Serializer,
++ {
++ #path(self.value, serializer)
++ }
++ }
++
++ SerializeWith {
++ value: #value,
++ phantom: core::marker::PhantomData::<#item_ty>,
++ }
++};
++```
++
++## Repetition
++
++Repetition is done using `#(...)*` or `#(...),*` similar to `macro_rules!`. This
++iterates through the elements of any variable interpolated within the repetition
++and inserts a copy of the repetition body for each one. The variables in an
++interpolation may be anything that implements `IntoIterator`, including `Vec` or
++a pre-existing iterator.
++
++- `#(#var)*` — no separators
++- `#(#var),*` — the character before the asterisk is used as a separator
++- `#( struct #var; )*` — the repetition can contain other things
++- `#( #k => println!("{}", #v), )*` — even multiple interpolations
++
++Note that there is a difference between `#(#var ,)*` and `#(#var),*`—the latter
++does not produce a trailing comma. This matches the behavior of delimiters in
++`macro_rules!`.
++
++## Returning tokens to the compiler
++
++The `quote!` macro evaluates to an expression of type
++`proc_macro2::TokenStream`. Meanwhile Rust procedural macros are expected to
++return the type `proc_macro::TokenStream`.
++
++The difference between the two types is that `proc_macro` types are entirely
++specific to procedural macros and cannot ever exist in code outside of a
++procedural macro, while `proc_macro2` types may exist anywhere including tests
++and non-macro code like main.rs and build.rs. This is why even the procedural
++macro ecosystem is largely built around `proc_macro2`, because that ensures the
++libraries are unit testable and accessible in non-macro contexts.
++
++There is a [`From`]-conversion in both directions so returning the output of
++`quote!` from a procedural macro usually looks like `tokens.into()` or
++`proc_macro::TokenStream::from(tokens)`.
++
++[`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
++
++## Examples
++
++### Combining quoted fragments
++
++Usually you don't end up constructing an entire final `TokenStream` in one
++piece. Different parts may come from different helper functions. The tokens
++produced by `quote!` themselves implement `ToTokens` and so can be interpolated
++into later `quote!` invocations to build up a final result.
++
++```rust
++let type_definition = quote! {...};
++let methods = quote! {...};
++
++let tokens = quote! {
++ #type_definition
++ #methods
++};
++```
++
++### Constructing identifiers
++
++Suppose we have an identifier `ident` which came from somewhere in a macro
++input and we need to modify it in some way for the macro output. Let's consider
++prepending the identifier with an underscore.
++
++Simply interpolating the identifier next to an underscore will not have the
++behavior of concatenating them. The underscore and the identifier will continue
++to be two separate tokens as if you had written `_ x`.
++
++```rust
++// incorrect
++quote! {
++ let mut _#ident = 0;
++}
++```
++
++The solution is to perform token-level manipulations using the APIs provided by
++Syn and proc-macro2.
++
++```rust
++let concatenated = format!("_{}", ident);
++let varname = syn::Ident::new(&concatenated, ident.span());
++quote! {
++ let mut #varname = 0;
++}
++```
++
++### Making method calls
++
++Let's say our macro requires some type specified in the macro input to have a
++constructor called `new`. We have the type in a variable called `field_type` of
++type `syn::Type` and want to invoke the constructor.
++
++```rust
++// incorrect
++quote! {
++ let value = #field_type::new();
++}
++```
++
++This works only sometimes. If `field_type` is `String`, the expanded code
++contains `String::new()` which is fine. But if `field_type` is something like
++`Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid syntax.
++Ordinarily in handwritten Rust we would write `Vec::<i32>::new()` but for macros
++often the following is more convenient.
++
++```rust
++quote! {
++ let value = <#field_type>::new();
++}
++```
++
++This expands to `<Vec<i32>>::new()` which behaves correctly.
++
++A similar pattern is appropriate for trait methods.
++
++```rust
++quote! {
++ let value = <#field_type as core::default::Default>::default();
++}
++```
++
++## Hygiene
++
++Any interpolated tokens preserve the `Span` information provided by their
++`ToTokens` implementation. Tokens that originate within a `quote!` invocation
++are spanned with [`Span::call_site()`].
++
++[`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site
++
++A different span can be provided explicitly through the [`quote_spanned!`]
++macro.
++
++[`quote_spanned!`]: https://docs.rs/quote/0.6/quote/macro.quote_spanned.html
++
++### Limitations
++
++- A non-repeating variable may not be interpolated inside of a repeating block
++ ([#7]).
++- The same variable may not be interpolated more than once inside of a repeating
++ block ([#8]).
++
++[#7]: https://github.com/dtolnay/quote/issues/7
++[#8]: https://github.com/dtolnay/quote/issues/8
++
++### Recursion limit
++
++The `quote!` macro relies on deep recursion so some large invocations may fail
++with "recursion limit reached" when you compile. If it fails, bump up the
++recursion limit by adding `#![recursion_limit = "128"]` to your crate. An even
++higher limit may be necessary for especially large invocations. You don't need
++this unless the compiler tells you that you need it.
++
++## License
++
++Licensed under either of
++
++ * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
++ * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
++
++at your option.
++
++### Contribution
++
++Unless you explicitly state otherwise, any contribution intentionally submitted
++for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
++be dual licensed as above, without any additional terms or conditions.
+diff --git a/third_party/rust/quote-0.6.11/src/ext.rs b/third_party/rust/quote-0.6.11/src/ext.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/src/ext.rs
+@@ -0,0 +1,112 @@
++use super::ToTokens;
++
++use std::iter;
++
++use proc_macro2::{TokenStream, TokenTree};
++
++/// TokenStream extension trait with methods for appending tokens.
++///
++/// This trait is sealed and cannot be implemented outside of the `quote` crate.
++pub trait TokenStreamExt: private::Sealed {
++ /// For use by `ToTokens` implementations.
++ ///
++ /// Appends the token specified to this list of tokens.
++ fn append<U>(&mut self, token: U)
++ where
++ U: Into<TokenTree>;
++
++ /// For use by `ToTokens` implementations.
++ ///
++ /// ```edition2018
++ /// # use quote::{quote, TokenStreamExt, ToTokens};
++ /// # use proc_macro2::TokenStream;
++ /// #
++ /// struct X;
++ ///
++ /// impl ToTokens for X {
++ /// fn to_tokens(&self, tokens: &mut TokenStream) {
++ /// tokens.append_all(&[true, false]);
++ /// }
++ /// }
++ ///
++ /// let tokens = quote!(#X);
++ /// assert_eq!(tokens.to_string(), "true false");
++ /// ```
++ fn append_all<T, I>(&mut self, iter: I)
++ where
++ T: ToTokens,
++ I: IntoIterator<Item = T>;
++
++ /// For use by `ToTokens` implementations.
++ ///
++ /// Appends all of the items in the iterator `I`, separated by the tokens
++ /// `U`.
++ fn append_separated<T, I, U>(&mut self, iter: I, op: U)
++ where
++ T: ToTokens,
++ I: IntoIterator<Item = T>,
++ U: ToTokens;
++
++ /// For use by `ToTokens` implementations.
++ ///
++ /// Appends all tokens in the iterator `I`, appending `U` after each
++ /// element, including after the last element of the iterator.
++ fn append_terminated<T, I, U>(&mut self, iter: I, term: U)
++ where
++ T: ToTokens,
++ I: IntoIterator<Item = T>,
++ U: ToTokens;
++}
++
++impl TokenStreamExt for TokenStream {
++ fn append<U>(&mut self, token: U)
++ where
++ U: Into<TokenTree>,
++ {
++ self.extend(iter::once(token.into()));
++ }
++
++ fn append_all<T, I>(&mut self, iter: I)
++ where
++ T: ToTokens,
++ I: IntoIterator<Item = T>,
++ {
++ for token in iter {
++ token.to_tokens(self);
++ }
++ }
++
++ fn append_separated<T, I, U>(&mut self, iter: I, op: U)
++ where
++ T: ToTokens,
++ I: IntoIterator<Item = T>,
++ U: ToTokens,
++ {
++ for (i, token) in iter.into_iter().enumerate() {
++ if i > 0 {
++ op.to_tokens(self);
++ }
++ token.to_tokens(self);
++ }
++ }
++
++ fn append_terminated<T, I, U>(&mut self, iter: I, term: U)
++ where
++ T: ToTokens,
++ I: IntoIterator<Item = T>,
++ U: ToTokens,
++ {
++ for token in iter {
++ token.to_tokens(self);
++ term.to_tokens(self);
++ }
++ }
++}
++
++mod private {
++ use proc_macro2::TokenStream;
++
++ pub trait Sealed {}
++
++ impl Sealed for TokenStream {}
++}
+diff --git a/third_party/rust/quote-0.6.11/src/lib.rs b/third_party/rust/quote-0.6.11/src/lib.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/src/lib.rs
+@@ -0,0 +1,969 @@
++//! This crate provides the [`quote!`] macro for turning Rust syntax tree data
++//! structures into tokens of source code.
++//!
++//! [`quote!`]: macro.quote.html
++//!
++//! Procedural macros in Rust receive a stream of tokens as input, execute
++//! arbitrary Rust code to determine how to manipulate those tokens, and produce
++//! a stream of tokens to hand back to the compiler to compile into the caller's
++//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
++//! to return to the compiler.
++//!
++//! The idea of quasi-quoting is that we write *code* that we treat as *data*.
++//! Within the `quote!` macro, we can write what looks like code to our text
++//! editor or IDE. We get all the benefits of the editor's brace matching,
++//! syntax highlighting, indentation, and maybe autocompletion. But rather than
++//! compiling that as code into the current crate, we can treat it as data, pass
++//! it around, mutate it, and eventually hand it back to the compiler as tokens
++//! to compile into the macro caller's crate.
++//!
++//! This crate is motivated by the procedural macro use case, but is a
++//! general-purpose Rust quasi-quoting library and is not specific to procedural
++//! macros.
++//!
++//! *Version requirement: Quote supports any compiler version back to Rust's
++//! very first support for procedural macros in Rust 1.15.0.*
++//!
++//! ```toml
++//! [dependencies]
++//! quote = "0.6"
++//! ```
++//!
++//! # Example
++//!
++//! The following quasi-quoted block of code is something you might find in [a]
++//! procedural macro having to do with data structure serialization. The `#var`
++//! syntax performs interpolation of runtime variables into the quoted tokens.
++//! Check out the documentation of the [`quote!`] macro for more detail about
++//! the syntax. See also the [`quote_spanned!`] macro which is important for
++//! implementing hygienic procedural macros.
++//!
++//! [a]: https://serde.rs/
++//! [`quote_spanned!`]: macro.quote_spanned.html
++//!
++//! ```edition2018
++//! # use quote::quote;
++//! #
++//! # let generics = "";
++//! # let where_clause = "";
++//! # let field_ty = "";
++//! # let item_ty = "";
++//! # let path = "";
++//! # let value = "";
++//! #
++//! let tokens = quote! {
++//! struct SerializeWith #generics #where_clause {
++//! value: &'a #field_ty,
++//! phantom: core::marker::PhantomData<#item_ty>,
++//! }
++//!
++//! impl #generics serde::Serialize for SerializeWith #generics #where_clause {
++//! fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
++//! where
++//! S: serde::Serializer,
++//! {
++//! #path(self.value, serializer)
++//! }
++//! }
++//!
++//! SerializeWith {
++//! value: #value,
++//! phantom: core::marker::PhantomData::<#item_ty>,
++//! }
++//! };
++//! ```
++//!
++//! # Recursion limit
++//!
++//! The `quote!` macro relies on deep recursion so some large invocations may
++//! fail with "recursion limit reached" when you compile. If it fails, bump up
++//! the recursion limit by adding `#![recursion_limit = "128"]` to your crate.
++//! An even higher limit may be necessary for especially large invocations.
++
++// Quote types in rustdoc of other crates get linked to here.
++#![doc(html_root_url = "https://docs.rs/quote/0.6.11")]
++
++#[cfg(all(
++ not(all(target_arch = "wasm32", target_os = "unknown")),
++ feature = "proc-macro"
++))]
++extern crate proc_macro;
++extern crate proc_macro2;
++
++mod ext;
++pub use ext::TokenStreamExt;
++
++mod to_tokens;
++pub use to_tokens::ToTokens;
++
++// Not public API.
++#[doc(hidden)]
++pub mod __rt {
++ use ext::TokenStreamExt;
++ pub use proc_macro2::*;
++
++ fn is_ident_start(c: u8) -> bool {
++ (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_'
++ }
++
++ fn is_ident_continue(c: u8) -> bool {
++ (b'a' <= c && c <= b'z')
++ || (b'A' <= c && c <= b'Z')
++ || c == b'_'
++ || (b'0' <= c && c <= b'9')
++ }
++
++ fn is_ident(token: &str) -> bool {
++ if token.bytes().all(|digit| digit >= b'0' && digit <= b'9') {
++ return false;
++ }
++
++ let mut bytes = token.bytes();
++ let first = bytes.next().unwrap();
++ if !is_ident_start(first) {
++ return false;
++ }
++ for ch in bytes {
++ if !is_ident_continue(ch) {
++ return false;
++ }
++ }
++ true
++ }
++
++ pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) {
++ if is_ident(s) {
++ // Fast path, since idents are the most common token.
++ tokens.append(Ident::new(s, span));
++ } else {
++ let s: TokenStream = s.parse().expect("invalid token stream");
++ tokens.extend(s.into_iter().map(|mut t| {
++ t.set_span(span);
++ t
++ }));
++ }
++ }
++
++ macro_rules! push_punct {
++ ($name:ident $char1:tt) => {
++ pub fn $name(tokens: &mut TokenStream, span: Span) {
++ let mut punct = Punct::new($char1, Spacing::Alone);
++ punct.set_span(span);
++ tokens.append(punct);
++ }
++ };
++ ($name:ident $char1:tt $char2:tt) => {
++ pub fn $name(tokens: &mut TokenStream, span: Span) {
++ let mut punct = Punct::new($char1, Spacing::Joint);
++ punct.set_span(span);
++ tokens.append(punct);
++ let mut punct = Punct::new($char2, Spacing::Alone);
++ punct.set_span(span);
++ tokens.append(punct);
++ }
++ };
++ ($name:ident $char1:tt $char2:tt $char3:tt) => {
++ pub fn $name(tokens: &mut TokenStream, span: Span) {
++ let mut punct = Punct::new($char1, Spacing::Joint);
++ punct.set_span(span);
++ tokens.append(punct);
++ let mut punct = Punct::new($char2, Spacing::Joint);
++ punct.set_span(span);
++ tokens.append(punct);
++ let mut punct = Punct::new($char3, Spacing::Alone);
++ punct.set_span(span);
++ tokens.append(punct);
++ }
++ };
++ }
++
++ push_punct!(push_add '+');
++ push_punct!(push_add_eq '+' '=');
++ push_punct!(push_and '&');
++ push_punct!(push_and_and '&' '&');
++ push_punct!(push_and_eq '&' '=');
++ push_punct!(push_at '@');
++ push_punct!(push_bang '!');
++ push_punct!(push_caret '^');
++ push_punct!(push_caret_eq '^' '=');
++ push_punct!(push_colon ':');
++ push_punct!(push_colon2 ':' ':');
++ push_punct!(push_comma ',');
++ push_punct!(push_div '/');
++ push_punct!(push_div_eq '/' '=');
++ push_punct!(push_dot '.');
++ push_punct!(push_dot2 '.' '.');
++ push_punct!(push_dot3 '.' '.' '.');
++ push_punct!(push_dot_dot_eq '.' '.' '=');
++ push_punct!(push_eq '=');
++ push_punct!(push_eq_eq '=' '=');
++ push_punct!(push_ge '>' '=');
++ push_punct!(push_gt '>');
++ push_punct!(push_le '<' '=');
++ push_punct!(push_lt '<');
++ push_punct!(push_mul_eq '*' '=');
++ push_punct!(push_ne '!' '=');
++ push_punct!(push_or '|');
++ push_punct!(push_or_eq '|' '=');
++ push_punct!(push_or_or '|' '|');
++ push_punct!(push_pound '#');
++ push_punct!(push_question '?');
++ push_punct!(push_rarrow '-' '>');
++ push_punct!(push_larrow '<' '-');
++ push_punct!(push_rem '%');
++ push_punct!(push_rem_eq '%' '=');
++ push_punct!(push_fat_arrow '=' '>');
++ push_punct!(push_semi ';');
++ push_punct!(push_shl '<' '<');
++ push_punct!(push_shl_eq '<' '<' '=');
++ push_punct!(push_shr '>' '>');
++ push_punct!(push_shr_eq '>' '>' '=');
++ push_punct!(push_star '*');
++ push_punct!(push_sub '-');
++ push_punct!(push_sub_eq '-' '=');
++}
++
++/// The whole point.
++///
++/// Performs variable interpolation against the input and produces it as
++/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
++/// `into()` to build a `TokenStream`.
++///
++/// [`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html
++///
++/// # Interpolation
++///
++/// Variable interpolation is done with `#var` (similar to `$var` in
++/// `macro_rules!` macros). This grabs the `var` variable that is currently in
++/// scope and inserts it in that location in the output tokens. Any type
++/// implementing the [`ToTokens`] trait can be interpolated. This includes most
++/// Rust primitive types as well as most of the syntax tree types from the [Syn]
++/// crate.
++///
++/// [`ToTokens`]: trait.ToTokens.html
++/// [Syn]: https://github.com/dtolnay/syn
++///
++/// Repetition is done using `#(...)*` or `#(...),*` again similar to
++/// `macro_rules!`. This iterates through the elements of any variable
++/// interpolated within the repetition and inserts a copy of the repetition body
++/// for each one. The variables in an interpolation may be anything that
++/// implements `IntoIterator`, including `Vec` or a pre-existing iterator.
++///
++/// - `#(#var)*` — no separators
++/// - `#(#var),*` — the character before the asterisk is used as a separator
++/// - `#( struct #var; )*` — the repetition can contain other tokens
++/// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
++///
++/// # Hygiene
++///
++/// Any interpolated tokens preserve the `Span` information provided by their
++/// `ToTokens` implementation. Tokens that originate within the `quote!`
++/// invocation are spanned with [`Span::call_site()`].
++///
++/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site
++///
++/// A different span can be provided through the [`quote_spanned!`] macro.
++///
++/// [`quote_spanned!`]: macro.quote_spanned.html
++///
++/// # Return type
++///
++/// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
++/// Meanwhile Rust procedural macros are expected to return the type
++/// `proc_macro::TokenStream`.
++///
++/// The difference between the two types is that `proc_macro` types are entirely
++/// specific to procedural macros and cannot ever exist in code outside of a
++/// procedural macro, while `proc_macro2` types may exist anywhere including
++/// tests and non-macro code like main.rs and build.rs. This is why even the
++/// procedural macro ecosystem is largely built around `proc_macro2`, because
++/// that ensures the libraries are unit testable and accessible in non-macro
++/// contexts.
++///
++/// There is a [`From`]-conversion in both directions so returning the output of
++/// `quote!` from a procedural macro usually looks like `tokens.into()` or
++/// `proc_macro::TokenStream::from(tokens)`.
++///
++/// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
++///
++/// # Examples
++///
++/// ## Procedural macro
++///
++/// The structure of a basic procedural macro is as follows. Refer to the [Syn]
++/// crate for further useful guidance on using `quote!` as part of a procedural
++/// macro.
++///
++/// [Syn]: https://github.com/dtolnay/syn
++///
++/// ```edition2018
++/// # #[cfg(any())]
++/// extern crate proc_macro;
++/// # use proc_macro2 as proc_macro;
++///
++/// use proc_macro::TokenStream;
++/// use quote::quote;
++///
++/// # const IGNORE_TOKENS: &'static str = stringify! {
++/// #[proc_macro_derive(HeapSize)]
++/// # };
++/// pub fn derive_heap_size(input: TokenStream) -> TokenStream {
++/// // Parse the input and figure out what implementation to generate...
++/// # const IGNORE_TOKENS: &'static str = stringify! {
++/// let name = /* ... */;
++/// let expr = /* ... */;
++/// # };
++/// #
++/// # let name = 0;
++/// # let expr = 0;
++///
++/// let expanded = quote! {
++/// // The generated impl.
++/// impl heapsize::HeapSize for #name {
++/// fn heap_size_of_children(&self) -> usize {
++/// #expr
++/// }
++/// }
++/// };
++///
++/// // Hand the output tokens back to the compiler.
++/// TokenStream::from(expanded)
++/// }
++/// ```
++///
++/// ## Combining quoted fragments
++///
++/// Usually you don't end up constructing an entire final `TokenStream` in one
++/// piece. Different parts may come from different helper functions. The tokens
++/// produced by `quote!` themselves implement `ToTokens` and so can be
++/// interpolated into later `quote!` invocations to build up a final result.
++///
++/// ```edition2018
++/// # use quote::quote;
++/// #
++/// let type_definition = quote! {...};
++/// let methods = quote! {...};
++///
++/// let tokens = quote! {
++/// #type_definition
++/// #methods
++/// };
++/// ```
++///
++/// ## Constructing identifiers
++///
++/// Suppose we have an identifier `ident` which came from somewhere in a macro
++/// input and we need to modify it in some way for the macro output. Let's
++/// consider prepending the identifier with an underscore.
++///
++/// Simply interpolating the identifier next to an underscore will not have the
++/// behavior of concatenating them. The underscore and the identifier will
++/// continue to be two separate tokens as if you had written `_ x`.
++///
++/// ```edition2018
++/// # use proc_macro2::{self as syn, Span};
++/// # use quote::quote;
++/// #
++/// # let ident = syn::Ident::new("i", Span::call_site());
++/// #
++/// // incorrect
++/// quote! {
++/// let mut _#ident = 0;
++/// }
++/// # ;
++/// ```
++///
++/// The solution is to perform token-level manipulations using the APIs provided
++/// by Syn and proc-macro2.
++///
++/// ```edition2018
++/// # use proc_macro2::{self as syn, Span};
++/// # use quote::quote;
++/// #
++/// # let ident = syn::Ident::new("i", Span::call_site());
++/// #
++/// let concatenated = format!("_{}", ident);
++/// let varname = syn::Ident::new(&concatenated, ident.span());
++/// quote! {
++/// let mut #varname = 0;
++/// }
++/// # ;
++/// ```
++///
++/// ## Making method calls
++///
++/// Let's say our macro requires some type specified in the macro input to have
++/// a constructor called `new`. We have the type in a variable called
++/// `field_type` of type `syn::Type` and want to invoke the constructor.
++///
++/// ```edition2018
++/// # use quote::quote;
++/// #
++/// # let field_type = quote!(...);
++/// #
++/// // incorrect
++/// quote! {
++/// let value = #field_type::new();
++/// }
++/// # ;
++/// ```
++///
++/// This works only sometimes. If `field_type` is `String`, the expanded code
++/// contains `String::new()` which is fine. But if `field_type` is something
++/// like `Vec<i32>` then the expanded code is `Vec<i32>::new()` which is invalid
++/// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
++/// but for macros often the following is more convenient.
++///
++/// ```edition2018
++/// # use quote::quote;
++/// #
++/// # let field_type = quote!(...);
++/// #
++/// quote! {
++/// let value = <#field_type>::new();
++/// }
++/// # ;
++/// ```
++///
++/// This expands to `<Vec<i32>>::new()` which behaves correctly.
++///
++/// A similar pattern is appropriate for trait methods.
++///
++/// ```edition2018
++/// # use quote::quote;
++/// #
++/// # let field_type = quote!(...);
++/// #
++/// quote! {
++/// let value = <#field_type as core::default::Default>::default();
++/// }
++/// # ;
++/// ```
++#[macro_export(local_inner_macros)]
++macro_rules! quote {
++ ($($tt:tt)*) => (quote_spanned!($crate::__rt::Span::call_site()=> $($tt)*));
++}
++
++/// Same as `quote!`, but applies a given span to all tokens originating within
++/// the macro invocation.
++///
++/// # Syntax
++///
++/// A span expression of type [`Span`], followed by `=>`, followed by the tokens
++/// to quote. The span expression should be brief -- use a variable for anything
++/// more than a few characters. There should be no space before the `=>` token.
++///
++/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
++///
++/// ```edition2018
++/// # use proc_macro2::Span;
++/// # use quote::quote_spanned;
++/// #
++/// # const IGNORE_TOKENS: &'static str = stringify! {
++/// let span = /* ... */;
++/// # };
++/// # let span = Span::call_site();
++/// # let init = 0;
++///
++/// // On one line, use parentheses.
++/// let tokens = quote_spanned!(span=> Box::into_raw(Box::new(#init)));
++///
++/// // On multiple lines, place the span at the top and use braces.
++/// let tokens = quote_spanned! {span=>
++/// Box::into_raw(Box::new(#init))
++/// };
++/// ```
++///
++/// The lack of space before the `=>` should look jarring to Rust programmers
++/// and this is intentional. The formatting is designed to be visibly
++/// off-balance and draw the eye a particular way, due to the span expression
++/// being evaluated in the context of the procedural macro and the remaining
++/// tokens being evaluated in the generated code.
++///
++/// # Hygiene
++///
++/// Any interpolated tokens preserve the `Span` information provided by their
++/// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
++/// invocation are spanned with the given span argument.
++///
++/// # Example
++///
++/// The following procedural macro code uses `quote_spanned!` to assert that a
++/// particular Rust type implements the [`Sync`] trait so that references can be
++/// safely shared between threads.
++///
++/// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
++///
++/// ```edition2018
++/// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
++/// # use proc_macro2::{Span, TokenStream};
++/// #
++/// # struct Type;
++/// #
++/// # impl Type {
++/// # fn span(&self) -> Span {
++/// # Span::call_site()
++/// # }
++/// # }
++/// #
++/// # impl ToTokens for Type {
++/// # fn to_tokens(&self, _tokens: &mut TokenStream) {}
++/// # }
++/// #
++/// # let ty = Type;
++/// # let call_site = Span::call_site();
++/// #
++/// let ty_span = ty.span();
++/// let assert_sync = quote_spanned! {ty_span=>
++/// struct _AssertSync where #ty: Sync;
++/// };
++/// ```
++///
++/// If the assertion fails, the user will see an error like the following. The
++/// input span of their type is hightlighted in the error.
++///
++/// ```text
++/// error[E0277]: the trait bound `*const (): std::marker::Sync` is not satisfied
++/// --> src/main.rs:10:21
++/// |
++/// 10 | static ref PTR: *const () = &();
++/// | ^^^^^^^^^ `*const ()` cannot be shared between threads safely
++/// ```
++///
++/// In this example it is important for the where-clause to be spanned with the
++/// line/column information of the user's input type so that error messages are
++/// placed appropriately by the compiler. But it is also incredibly important
++/// that `Sync` resolves at the macro definition site and not the macro call
++/// site. If we resolve `Sync` at the same span that the user's type is going to
++/// be resolved, then they could bypass our check by defining their own trait
++/// named `Sync` that is implemented for their type.
++#[macro_export(local_inner_macros)]
++macro_rules! quote_spanned {
++ ($span:expr=> $($tt:tt)*) => {
++ {
++ let mut _s = $crate::__rt::TokenStream::new();
++ let _span = $span;
++ quote_each_token!(_s _span $($tt)*);
++ _s
++ }
++ };
++}
++
++// Extract the names of all #metavariables and pass them to the $finish macro.
++//
++// in: pounded_var_names!(then () a #b c #( #d )* #e)
++// out: then!(() b d e)
++#[macro_export(local_inner_macros)]
++#[doc(hidden)]
++macro_rules! pounded_var_names {
++ ($finish:ident ($($found:ident)*) # ( $($inner:tt)* ) $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*) # [ $($inner:tt)* ] $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*) # { $($inner:tt)* } $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*) # $first:ident $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)* $first) $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*) ( $($inner:tt)* ) $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*) [ $($inner:tt)* ] $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*) { $($inner:tt)* } $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*) $ignore:tt $($rest:tt)*) => {
++ pounded_var_names!($finish ($($found)*) $($rest)*)
++ };
++
++ ($finish:ident ($($found:ident)*)) => {
++ $finish!(() $($found)*)
++ };
++}
++
++// in: nested_tuples_pat!(() a b c d e)
++// out: ((((a b) c) d) e)
++//
++// in: nested_tuples_pat!(() a)
++// out: a
++#[macro_export(local_inner_macros)]
++#[doc(hidden)]
++macro_rules! nested_tuples_pat {
++ (()) => {
++ &()
++ };
++
++ (() $first:ident $($rest:ident)*) => {
++ nested_tuples_pat!(($first) $($rest)*)
++ };
++
++ (($pat:pat) $first:ident $($rest:ident)*) => {
++ nested_tuples_pat!((($pat, $first)) $($rest)*)
++ };
++
++ (($done:pat)) => {
++ $done
++ };
++}
++
++// in: multi_zip_expr!(() a b c d e)
++// out: a.into_iter().zip(b).zip(c).zip(d).zip(e)
++//
++// in: multi_zip_iter!(() a)
++// out: a
++#[macro_export(local_inner_macros)]
++#[doc(hidden)]
++macro_rules! multi_zip_expr {
++ (()) => {
++ &[]
++ };
++
++ (() $single:ident) => {
++ $single
++ };
++
++ (() $first:ident $($rest:ident)*) => {
++ multi_zip_expr!(($first.into_iter()) $($rest)*)
++ };
++
++ (($zips:expr) $first:ident $($rest:ident)*) => {
++ multi_zip_expr!(($zips.zip($first)) $($rest)*)
++ };
++
++ (($done:expr)) => {
++ $done
++ };
++}
++
++#[macro_export(local_inner_macros)]
++#[doc(hidden)]
++macro_rules! quote_each_token {
++ ($tokens:ident $span:ident) => {};
++
++ ($tokens:ident $span:ident # ! $($rest:tt)*) => {
++ quote_each_token!($tokens $span #);
++ quote_each_token!($tokens $span !);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident # ( $($inner:tt)* ) * $($rest:tt)*) => {
++ for pounded_var_names!(nested_tuples_pat () $($inner)*)
++ in pounded_var_names!(multi_zip_expr () $($inner)*) {
++ quote_each_token!($tokens $span $($inner)*);
++ }
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt * $($rest:tt)*) => {
++ for (_i, pounded_var_names!(nested_tuples_pat () $($inner)*))
++ in pounded_var_names!(multi_zip_expr () $($inner)*).into_iter().enumerate() {
++ if _i > 0 {
++ quote_each_token!($tokens $span $sep);
++ }
++ quote_each_token!($tokens $span $($inner)*);
++ }
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident # [ $($inner:tt)* ] $($rest:tt)*) => {
++ quote_each_token!($tokens $span #);
++ $tokens.extend({
++ let mut g = $crate::__rt::Group::new(
++ $crate::__rt::Delimiter::Bracket,
++ quote_spanned!($span=> $($inner)*),
++ );
++ g.set_span($span);
++ Some($crate::__rt::TokenTree::from(g))
++ });
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident # $first:ident $($rest:tt)*) => {
++ $crate::ToTokens::to_tokens(&$first, &mut $tokens);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ( $($first:tt)* ) $($rest:tt)*) => {
++ $tokens.extend({
++ let mut g = $crate::__rt::Group::new(
++ $crate::__rt::Delimiter::Parenthesis,
++ quote_spanned!($span=> $($first)*),
++ );
++ g.set_span($span);
++ Some($crate::__rt::TokenTree::from(g))
++ });
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident [ $($first:tt)* ] $($rest:tt)*) => {
++ $tokens.extend({
++ let mut g = $crate::__rt::Group::new(
++ $crate::__rt::Delimiter::Bracket,
++ quote_spanned!($span=> $($first)*),
++ );
++ g.set_span($span);
++ Some($crate::__rt::TokenTree::from(g))
++ });
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident { $($first:tt)* } $($rest:tt)*) => {
++ $tokens.extend({
++ let mut g = $crate::__rt::Group::new(
++ $crate::__rt::Delimiter::Brace,
++ quote_spanned!($span=> $($first)*),
++ );
++ g.set_span($span);
++ Some($crate::__rt::TokenTree::from(g))
++ });
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident + $($rest:tt)*) => {
++ $crate::__rt::push_add(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident += $($rest:tt)*) => {
++ $crate::__rt::push_add_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident & $($rest:tt)*) => {
++ $crate::__rt::push_and(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident && $($rest:tt)*) => {
++ $crate::__rt::push_and_and(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident &= $($rest:tt)*) => {
++ $crate::__rt::push_and_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident @ $($rest:tt)*) => {
++ $crate::__rt::push_at(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ! $($rest:tt)*) => {
++ $crate::__rt::push_bang(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ^ $($rest:tt)*) => {
++ $crate::__rt::push_caret(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ^= $($rest:tt)*) => {
++ $crate::__rt::push_caret_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident : $($rest:tt)*) => {
++ $crate::__rt::push_colon(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident :: $($rest:tt)*) => {
++ $crate::__rt::push_colon2(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident , $($rest:tt)*) => {
++ $crate::__rt::push_comma(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident / $($rest:tt)*) => {
++ $crate::__rt::push_div(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident /= $($rest:tt)*) => {
++ $crate::__rt::push_div_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident . $($rest:tt)*) => {
++ $crate::__rt::push_dot(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident .. $($rest:tt)*) => {
++ $crate::__rt::push_dot2(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ... $($rest:tt)*) => {
++ $crate::__rt::push_dot3(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ..= $($rest:tt)*) => {
++ $crate::__rt::push_dot_dot_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident = $($rest:tt)*) => {
++ $crate::__rt::push_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident == $($rest:tt)*) => {
++ $crate::__rt::push_eq_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident >= $($rest:tt)*) => {
++ $crate::__rt::push_ge(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident > $($rest:tt)*) => {
++ $crate::__rt::push_gt(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident <= $($rest:tt)*) => {
++ $crate::__rt::push_le(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident < $($rest:tt)*) => {
++ $crate::__rt::push_lt(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident *= $($rest:tt)*) => {
++ $crate::__rt::push_mul_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident != $($rest:tt)*) => {
++ $crate::__rt::push_ne(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident | $($rest:tt)*) => {
++ $crate::__rt::push_or(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident |= $($rest:tt)*) => {
++ $crate::__rt::push_or_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident || $($rest:tt)*) => {
++ $crate::__rt::push_or_or(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident # $($rest:tt)*) => {
++ $crate::__rt::push_pound(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ? $($rest:tt)*) => {
++ $crate::__rt::push_question(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident -> $($rest:tt)*) => {
++ $crate::__rt::push_rarrow(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident <- $($rest:tt)*) => {
++ $crate::__rt::push_larrow(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident % $($rest:tt)*) => {
++ $crate::__rt::push_rem(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident %= $($rest:tt)*) => {
++ $crate::__rt::push_rem_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident => $($rest:tt)*) => {
++ $crate::__rt::push_fat_arrow(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident ; $($rest:tt)*) => {
++ $crate::__rt::push_semi(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident << $($rest:tt)*) => {
++ $crate::__rt::push_shl(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident <<= $($rest:tt)*) => {
++ $crate::__rt::push_shl_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident >> $($rest:tt)*) => {
++ $crate::__rt::push_shr(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident >>= $($rest:tt)*) => {
++ $crate::__rt::push_shr_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident * $($rest:tt)*) => {
++ $crate::__rt::push_star(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident - $($rest:tt)*) => {
++ $crate::__rt::push_sub(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident -= $($rest:tt)*) => {
++ $crate::__rt::push_sub_eq(&mut $tokens, $span);
++ quote_each_token!($tokens $span $($rest)*);
++ };
++
++ ($tokens:ident $span:ident $first:tt $($rest:tt)*) => {
++ $crate::__rt::parse(&mut $tokens, $span, quote_stringify!($first));
++ quote_each_token!($tokens $span $($rest)*);
++ };
++}
++
++// Unhygienically invoke whatever `stringify` the caller has in scope i.e. not a
++// local macro. The macros marked `local_inner_macros` above cannot invoke
++// `stringify` directly.
++#[macro_export]
++#[doc(hidden)]
++macro_rules! quote_stringify {
++ ($tt:tt) => {
++ stringify!($tt)
++ };
++}
+diff --git a/third_party/rust/quote-0.6.11/src/to_tokens.rs b/third_party/rust/quote-0.6.11/src/to_tokens.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/src/to_tokens.rs
+@@ -0,0 +1,198 @@
++use super::TokenStreamExt;
++
++use std::borrow::Cow;
++use std::iter;
++
++use proc_macro2::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree};
++
++/// Types that can be interpolated inside a [`quote!`] invocation.
++///
++/// [`quote!`]: macro.quote.html
++pub trait ToTokens {
++ /// Write `self` to the given `TokenStream`.
++ ///
++ /// The token append methods provided by the [`TokenStreamExt`] extension
++ /// trait may be useful for implementing `ToTokens`.
++ ///
++ /// [`TokenStreamExt`]: trait.TokenStreamExt.html
++ ///
++ /// # Example
++ ///
++ /// Example implementation for a struct representing Rust paths like
++ /// `std::cmp::PartialEq`:
++ ///
++ /// ```edition2018
++ /// use proc_macro2::{TokenTree, Spacing, Span, Punct, TokenStream};
++ /// use quote::{TokenStreamExt, ToTokens};
++ ///
++ /// pub struct Path {
++ /// pub global: bool,
++ /// pub segments: Vec<PathSegment>,
++ /// }
++ ///
++ /// impl ToTokens for Path {
++ /// fn to_tokens(&self, tokens: &mut TokenStream) {
++ /// for (i, segment) in self.segments.iter().enumerate() {
++ /// if i > 0 || self.global {
++ /// // Double colon `::`
++ /// tokens.append(Punct::new(':', Spacing::Joint));
++ /// tokens.append(Punct::new(':', Spacing::Alone));
++ /// }
++ /// segment.to_tokens(tokens);
++ /// }
++ /// }
++ /// }
++ /// #
++ /// # pub struct PathSegment;
++ /// #
++ /// # impl ToTokens for PathSegment {
++ /// # fn to_tokens(&self, tokens: &mut TokenStream) {
++ /// # unimplemented!()
++ /// # }
++ /// # }
++ /// ```
++ fn to_tokens(&self, tokens: &mut TokenStream);
++
++ /// Convert `self` directly into a `TokenStream` object.
++ ///
++ /// This method is implicitly implemented using `to_tokens`, and acts as a
++ /// convenience method for consumers of the `ToTokens` trait.
++ fn into_token_stream(self) -> TokenStream
++ where
++ Self: Sized,
++ {
++ let mut tokens = TokenStream::new();
++ self.to_tokens(&mut tokens);
++ tokens
++ }
++}
++
++impl<'a, T: ?Sized + ToTokens> ToTokens for &'a T {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ (**self).to_tokens(tokens);
++ }
++}
++
++impl<'a, T: ?Sized + ToTokens> ToTokens for &'a mut T {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ (**self).to_tokens(tokens);
++ }
++}
++
++impl<'a, T: ?Sized + ToOwned + ToTokens> ToTokens for Cow<'a, T> {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ (**self).to_tokens(tokens);
++ }
++}
++
++impl<T: ?Sized + ToTokens> ToTokens for Box<T> {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ (**self).to_tokens(tokens);
++ }
++}
++
++impl<T: ToTokens> ToTokens for Option<T> {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ if let Some(ref t) = *self {
++ t.to_tokens(tokens);
++ }
++ }
++}
++
++impl ToTokens for str {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(Literal::string(self));
++ }
++}
++
++impl ToTokens for String {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ self.as_str().to_tokens(tokens);
++ }
++}
++
++macro_rules! primitive {
++ ($($t:ident => $name:ident)*) => ($(
++ impl ToTokens for $t {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(Literal::$name(*self));
++ }
++ }
++ )*)
++}
++
++primitive! {
++ i8 => i8_suffixed
++ i16 => i16_suffixed
++ i32 => i32_suffixed
++ i64 => i64_suffixed
++ isize => isize_suffixed
++
++ u8 => u8_suffixed
++ u16 => u16_suffixed
++ u32 => u32_suffixed
++ u64 => u64_suffixed
++ usize => usize_suffixed
++
++ f32 => f32_suffixed
++ f64 => f64_suffixed
++}
++
++#[cfg(integer128)]
++primitive! {
++ i128 => i128_suffixed
++ u128 => u128_suffixed
++}
++
++impl ToTokens for char {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(Literal::character(*self));
++ }
++}
++
++impl ToTokens for bool {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ let word = if *self { "true" } else { "false" };
++ tokens.append(Ident::new(word, Span::call_site()));
++ }
++}
++
++impl ToTokens for Group {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(self.clone());
++ }
++}
++
++impl ToTokens for Ident {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(self.clone());
++ }
++}
++
++impl ToTokens for Punct {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(self.clone());
++ }
++}
++
++impl ToTokens for Literal {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(self.clone());
++ }
++}
++
++impl ToTokens for TokenTree {
++ fn to_tokens(&self, dst: &mut TokenStream) {
++ dst.append(self.clone());
++ }
++}
++
++impl ToTokens for TokenStream {
++ fn to_tokens(&self, dst: &mut TokenStream) {
++ dst.extend(iter::once(self.clone()));
++ }
++
++ fn into_token_stream(self) -> TokenStream {
++ self
++ }
++}
+diff --git a/third_party/rust/quote/tests/conditional/integer128.rs b/third_party/rust/quote-0.6.11/tests/conditional/integer128.rs
+rename from third_party/rust/quote/tests/conditional/integer128.rs
+rename to third_party/rust/quote-0.6.11/tests/conditional/integer128.rs
+diff --git a/third_party/rust/quote-0.6.11/tests/test.rs b/third_party/rust/quote-0.6.11/tests/test.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote-0.6.11/tests/test.rs
+@@ -0,0 +1,295 @@
++#![cfg_attr(feature = "cargo-clippy", allow(blacklisted_name))]
++
++use std::borrow::Cow;
++
++extern crate proc_macro2;
++#[macro_use]
++extern crate quote;
++
++use proc_macro2::{Ident, Span, TokenStream};
++use quote::TokenStreamExt;
++
++mod conditional {
++ #[cfg(integer128)]
++ mod integer128;
++}
++
++struct X;
++
++impl quote::ToTokens for X {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ tokens.append(Ident::new("X", Span::call_site()));
++ }
++}
++
++#[test]
++fn test_quote_impl() {
++ let tokens = quote! {
++ impl<'a, T: ToTokens> ToTokens for &'a T {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ (**self).to_tokens(tokens)
++ }
++ }
++ };
++
++ let expected = concat!(
++ "impl < 'a , T : ToTokens > ToTokens for & 'a T { ",
++ "fn to_tokens ( & self , tokens : & mut TokenStream ) { ",
++ "( * * self ) . to_tokens ( tokens ) ",
++ "} ",
++ "}"
++ );
++
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_substitution() {
++ let x = X;
++ let tokens = quote!(#x <#x> (#x) [#x] {#x});
++
++ let expected = "X < X > ( X ) [ X ] { X }";
++
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_iter() {
++ let primes = &[X, X, X, X];
++
++ assert_eq!("X X X X", quote!(#(#primes)*).to_string());
++
++ assert_eq!("X , X , X , X ,", quote!(#(#primes,)*).to_string());
++
++ assert_eq!("X , X , X , X", quote!(#(#primes),*).to_string());
++}
++
++#[test]
++fn test_advanced() {
++ let generics = quote!( <'a, T> );
++
++ let where_clause = quote!( where T: Serialize );
++
++ let field_ty = quote!(String);
++
++ let item_ty = quote!(Cow<'a, str>);
++
++ let path = quote!(SomeTrait::serialize_with);
++
++ let value = quote!(self.x);
++
++ let tokens = quote! {
++ struct SerializeWith #generics #where_clause {
++ value: &'a #field_ty,
++ phantom: ::std::marker::PhantomData<#item_ty>,
++ }
++
++ impl #generics ::serde::Serialize for SerializeWith #generics #where_clause {
++ fn serialize<S>(&self, s: &mut S) -> Result<(), S::Error>
++ where S: ::serde::Serializer
++ {
++ #path(self.value, s)
++ }
++ }
++
++ SerializeWith {
++ value: #value,
++ phantom: ::std::marker::PhantomData::<#item_ty>,
++ }
++ };
++
++ let expected = concat!(
++ "struct SerializeWith < 'a , T > where T : Serialize { ",
++ "value : & 'a String , ",
++ "phantom : :: std :: marker :: PhantomData < Cow < 'a , str > > , ",
++ "} ",
++ "impl < 'a , T > :: serde :: Serialize for SerializeWith < 'a , T > where T : Serialize { ",
++ "fn serialize < S > ( & self , s : & mut S ) -> Result < ( ) , S :: Error > ",
++ "where S : :: serde :: Serializer ",
++ "{ ",
++ "SomeTrait :: serialize_with ( self . value , s ) ",
++ "} ",
++ "} ",
++ "SerializeWith { ",
++ "value : self . x , ",
++ "phantom : :: std :: marker :: PhantomData :: < Cow < 'a , str > > , ",
++ "}"
++ );
++
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_integer() {
++ let ii8 = -1i8;
++ let ii16 = -1i16;
++ let ii32 = -1i32;
++ let ii64 = -1i64;
++ let iisize = -1isize;
++ let uu8 = 1u8;
++ let uu16 = 1u16;
++ let uu32 = 1u32;
++ let uu64 = 1u64;
++ let uusize = 1usize;
++
++ let tokens = quote! {
++ #ii8 #ii16 #ii32 #ii64 #iisize
++ #uu8 #uu16 #uu32 #uu64 #uusize
++ };
++ let expected = "-1i8 -1i16 -1i32 -1i64 -1isize 1u8 1u16 1u32 1u64 1usize";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_floating() {
++ let e32 = 2.345f32;
++
++ let e64 = 2.345f64;
++
++ let tokens = quote! {
++ #e32
++ #e64
++ };
++ let expected = concat!("2.345f32 2.345f64");
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_char() {
++ let zero = '\0';
++ let pound = '#';
++ let quote = '"';
++ let apost = '\'';
++ let newline = '\n';
++ let heart = '\u{2764}';
++
++ let tokens = quote! {
++ #zero #pound #quote #apost #newline #heart
++ };
++ let expected = "'\\u{0}' '#' '\\\"' '\\'' '\\n' '\\u{2764}'";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_str() {
++ let s = "\0 a 'b \" c";
++ let tokens = quote!(#s);
++ let expected = "\"\\u{0} a \\'b \\\" c\"";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_string() {
++ let s = "\0 a 'b \" c".to_string();
++ let tokens = quote!(#s);
++ let expected = "\"\\u{0} a \\'b \\\" c\"";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_ident() {
++ let foo = Ident::new("Foo", Span::call_site());
++ let bar = Ident::new(&format!("Bar{}", 7), Span::call_site());
++ let tokens = quote!(struct #foo; enum #bar {});
++ let expected = "struct Foo ; enum Bar7 { }";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_duplicate() {
++ let ch = 'x';
++
++ let tokens = quote!(#ch #ch);
++
++ let expected = "'x' 'x'";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_fancy_repetition() {
++ let foo = vec!["a", "b"];
++ let bar = vec![true, false];
++
++ let tokens = quote! {
++ #(#foo: #bar),*
++ };
++
++ let expected = r#""a" : true , "b" : false"#;
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_nested_fancy_repetition() {
++ let nested = vec![vec!['a', 'b', 'c'], vec!['x', 'y', 'z']];
++
++ let tokens = quote! {
++ #(
++ #(#nested)*
++ ),*
++ };
++
++ let expected = "'a' 'b' 'c' , 'x' 'y' 'z'";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_empty_repetition() {
++ let tokens = quote!(#(a b)* #(c d),*);
++ assert_eq!("", tokens.to_string());
++}
++
++#[test]
++fn test_variable_name_conflict() {
++ // The implementation of `#(...),*` uses the variable `_i` but it should be
++ // fine, if a little confusing when debugging.
++ let _i = vec!['a', 'b'];
++ let tokens = quote! { #(#_i),* };
++ let expected = "'a' , 'b'";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_empty_quote() {
++ let tokens = quote!();
++ assert_eq!("", tokens.to_string());
++}
++
++#[test]
++fn test_box_str() {
++ let b = "str".to_owned().into_boxed_str();
++ let tokens = quote! { #b };
++ assert_eq!("\"str\"", tokens.to_string());
++}
++
++#[test]
++fn test_cow() {
++ let owned: Cow<Ident> = Cow::Owned(Ident::new("owned", Span::call_site()));
++
++ let ident = Ident::new("borrowed", Span::call_site());
++ let borrowed = Cow::Borrowed(&ident);
++
++ let tokens = quote! { #owned #borrowed };
++ assert_eq!("owned borrowed", tokens.to_string());
++}
++
++#[test]
++fn test_closure() {
++ fn field_i(i: usize) -> Ident {
++ Ident::new(&format!("__field{}", i), Span::call_site())
++ }
++
++ let fields = (0usize..3)
++ .map(field_i as fn(_) -> _)
++ .map(|var| quote! { #var });
++
++ let tokens = quote! { #(#fields)* };
++ assert_eq!("__field0 __field1 __field2", tokens.to_string());
++}
++
++#[test]
++fn test_append_tokens() {
++ let mut a = quote!(a);
++ let b = quote!(b);
++ a.append_all(b);
++ assert_eq!("a b", a.to_string());
++}
+diff --git a/third_party/rust/quote/.cargo-checksum.json b/third_party/rust/quote/.cargo-checksum.json
+--- a/third_party/rust/quote/.cargo-checksum.json
++++ b/third_party/rust/quote/.cargo-checksum.json
+@@ -1 +1 @@
+-{"files":{"Cargo.toml":"68f4dc89836a05a2347086addab1849567ef8073c552ec0dfca8f96fd20550f9","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"d9392d4c7af3bf9714f0a95801d64de46ffd4558cdfeea0eb85b414e555abb72","src/ext.rs":"03919239a20f8393288783a21bf6fdee12e405d13d162c9faa6f8f5ce54b003b","src/lib.rs":"5345b4d2e6f923724cec35c62d7397e6f04d5503d2d813bff7bbaa7ffc39a9cf","src/to_tokens.rs":"0dcd15cba2aa83abeb47b9a1babce7a29643b5efa2fe620b070cb37bb21a84f1","tests/conditional/integer128.rs":"d83e21a91efbaa801a82ae499111bdda2d31edaa620e78c0199eba42d69c9ee6","tests/test.rs":"810013d7fd77b738abd0ace90ce2f2f3e219c757652eabab29bc1c0ce4a73b24"},"package":"cdd8e04bd9c52e0342b406469d494fcb033be4bdbe5c606016defbb1681411e1"}
+\ No newline at end of file
++{"files":{"Cargo.toml":"b5c36a5bffa3623f84002fa884157ae303d2dae68d2f8a6d73ba87e82d7c56d7","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"c9a75f18b9ab2927829a208fc6aa2cf4e63b8420887ba29cdb265d6619ae82d5","README.md":"ea5abae24fdf6d9be51c80427bd12b95d146c4660e872599910cf062d6fbab9a","src/ext.rs":"a9fed3a1a4c9d3f2de717ba808af99291b995db2cbf8067f4b6927c39cc62bc6","src/format.rs":"46bf0859e6da5ec195a409ba8bbd2029d32a30d169c30c4c8aee7020f478a8a2","src/ident_fragment.rs":"0824dca06942d8e097d220db0ace0fe3ae7cf08f0a86e9828d012c131b6590c2","src/lib.rs":"bce63d6d9822373dab6f9a1f3df419b5753625e618474c304f05ab3b38845760","src/runtime.rs":"13263adfb56e2c597c69277b3500ab35ca8a08f60ba6a66f921ffa5cdc09bde2","src/spanned.rs":"adc0ed742ad17327c375879472d435cea168c208c303f53eb93cb2c0f10f3650","src/to_tokens.rs":"e589c1643479a9003d4dd1d9fa63714042b106f1b16d8ea3903cfe2f73a020f5","tests/compiletest.rs":"0a52a44786aea1c299c695bf948b2ed2081e4cc344e5c2cadceab4eb03d0010d","tests/test.rs":"92062fb9ba4a3b74345fede8e09e1d376107f98dcd79931a794433fa2d74aeb5","tests/ui/does-not-have-iter-interpolated-dup.rs":"ad13eea21d4cdd2ab6c082f633392e1ff20fb0d1af5f2177041e0bf7f30da695","tests/ui/does-not-have-iter-interpolated.rs":"83a5b3f240651adcbe4b6e51076d76d653ad439b37442cf4054f1fd3c073f3b7","tests/ui/does-not-have-iter-separated.rs":"fe413c48331d5e3a7ae5fef6a5892a90c72f610d54595879eb49d0a94154ba3f","tests/ui/does-not-have-iter.rs":"09dc9499d861b63cebb0848b855b78e2dc9497bfde37ba6339f3625ae009a62f","tests/ui/not-quotable.rs":"5759d0884943417609f28faadc70254a3e2fd3d9bd6ff7297a3fb70a77fafd8a","tests/ui/not-repeatable.rs":"b08405e02d46712d47e48ec8d0d68c93d8ebf3bb299714a373c2c954de79f6bd","tests/ui/wrong-type-span.rs":"5f310cb7fde3ef51bad01e7f286d244e3b6e67396cd2ea7eab77275c9d902699"},"package":"053a8c8bcc71fcce321828dc897a98ab9760bef03a4fc36693c231e5b3216cfe"}
+\ No newline at end of file
+diff --git a/third_party/rust/quote/Cargo.toml b/third_party/rust/quote/Cargo.toml
+--- a/third_party/rust/quote/Cargo.toml
++++ b/third_party/rust/quote/Cargo.toml
+@@ -3,7 +3,7 @@
+ # When uploading crates to the registry Cargo will automatically
+ # "normalize" Cargo.toml files for maximal compatibility
+ # with all versions of Cargo and also rewrite `path` dependencies
+-# to registry (e.g. crates.io) dependencies
++# to registry (e.g., crates.io) dependencies
+ #
+ # If you believe there's an error in this file please file an
+ # issue against the rust-lang/cargo repository. If you're
+@@ -11,8 +11,9 @@
+ # will likely look very different (and much more reasonable)
+
+ [package]
++edition = "2018"
+ name = "quote"
+-version = "0.6.11"
++version = "1.0.2"
+ authors = ["David Tolnay <dtolnay@gmail.com>"]
+ include = ["Cargo.toml", "src/**/*.rs", "tests/**/*.rs", "README.md", "LICENSE-APACHE", "LICENSE-MIT"]
+ description = "Quasi-quoting macro quote!(...)"
+@@ -20,11 +21,19 @@
+ readme = "README.md"
+ keywords = ["syn"]
+ categories = ["development-tools::procedural-macro-helpers"]
+-license = "MIT/Apache-2.0"
++license = "MIT OR Apache-2.0"
+ repository = "https://github.com/dtolnay/quote"
++
++[lib]
++name = "quote"
+ [dependencies.proc-macro2]
+-version = "0.4.21"
++version = "1.0"
+ default-features = false
++[dev-dependencies.rustversion]
++version = "0.1"
++
++[dev-dependencies.trybuild]
++version = "1.0"
+
+ [features]
+ default = ["proc-macro"]
+diff --git a/third_party/rust/quote/README.md b/third_party/rust/quote/README.md
+--- a/third_party/rust/quote/README.md
++++ b/third_party/rust/quote/README.md
+@@ -8,13 +8,13 @@
+ This crate provides the [`quote!`] macro for turning Rust syntax tree data
+ structures into tokens of source code.
+
+-[`quote!`]: https://docs.rs/quote/0.6/quote/macro.quote.html
++[`quote!`]: https://docs.rs/quote/1.0/quote/macro.quote.html
+
+ Procedural macros in Rust receive a stream of tokens as input, execute arbitrary
+ Rust code to determine how to manipulate those tokens, and produce a stream of
+ tokens to hand back to the compiler to compile into the caller's crate.
+-Quasi-quoting is a solution to one piece of that -- producing tokens to return
+-to the compiler.
++Quasi-quoting is a solution to one piece of that &mdash; producing tokens to
++return to the compiler.
+
+ The idea of quasi-quoting is that we write *code* that we treat as *data*.
+ Within the `quote!` macro, we can write what looks like code to our text editor
+@@ -35,7 +35,7 @@
+
+ ```toml
+ [dependencies]
+-quote = "0.6"
++quote = "1.0"
+ ```
+
+ ## Syntax
+@@ -44,13 +44,13 @@
+ that gets packaged into a [`TokenStream`] and can be treated as data. You should
+ think of `TokenStream` as representing a fragment of Rust source code.
+
+-[`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html
++[`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html
+
+ Within the `quote!` macro, interpolation is done with `#var`. Any type
+ implementing the [`quote::ToTokens`] trait can be interpolated. This includes
+ most Rust primitive types as well as most of the syntax tree types from [`syn`].
+
+-[`quote::ToTokens`]: https://docs.rs/quote/0.6/quote/trait.ToTokens.html
++[`quote::ToTokens`]: https://docs.rs/quote/1.0/quote/trait.ToTokens.html
+ [`syn`]: https://github.com/dtolnay/syn
+
+ ```rust
+@@ -148,8 +148,20 @@
+ }
+ ```
+
+-The solution is to perform token-level manipulations using the APIs provided by
+-Syn and proc-macro2.
++The solution is to build a new identifier token with the correct value. As this
++is such a common case, the `format_ident!` macro provides a convenient utility
++for doing so correctly.
++
++```rust
++let varname = format_ident!("_{}", ident);
++quote! {
++ let mut #varname = 0;
++}
++```
++
++Alternatively, the APIs provided by Syn and proc-macro2 can be used to directly
++build the identifier. This is roughly equivalent to the above, but will not
++handle `ident` being a raw identifier.
+
+ ```rust
+ let concatenated = format!("_{}", ident);
+@@ -200,42 +212,26 @@
+ `ToTokens` implementation. Tokens that originate within a `quote!` invocation
+ are spanned with [`Span::call_site()`].
+
+-[`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site
++[`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site
+
+ A different span can be provided explicitly through the [`quote_spanned!`]
+ macro.
+
+-[`quote_spanned!`]: https://docs.rs/quote/0.6/quote/macro.quote_spanned.html
+-
+-### Limitations
++[`quote_spanned!`]: https://docs.rs/quote/1.0/quote/macro.quote_spanned.html
+
+-- A non-repeating variable may not be interpolated inside of a repeating block
+- ([#7]).
+-- The same variable may not be interpolated more than once inside of a repeating
+- block ([#8]).
++<br>
+
+-[#7]: https://github.com/dtolnay/quote/issues/7
+-[#8]: https://github.com/dtolnay/quote/issues/8
+-
+-### Recursion limit
++#### License
+
+-The `quote!` macro relies on deep recursion so some large invocations may fail
+-with "recursion limit reached" when you compile. If it fails, bump up the
+-recursion limit by adding `#![recursion_limit = "128"]` to your crate. An even
+-higher limit may be necessary for especially large invocations. You don't need
+-this unless the compiler tells you that you need it.
+-
+-## License
++<sup>
++Licensed under either of <a href="LICENSE-APACHE">Apache License, Version
++2.0</a> or <a href="LICENSE-MIT">MIT license</a> at your option.
++</sup>
+
+-Licensed under either of
+-
+- * Apache License, Version 2.0 ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+- * MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT)
++<br>
+
+-at your option.
+-
+-### Contribution
+-
++<sub>
+ Unless you explicitly state otherwise, any contribution intentionally submitted
+ for inclusion in this crate by you, as defined in the Apache-2.0 license, shall
+ be dual licensed as above, without any additional terms or conditions.
++</sub>
+diff --git a/third_party/rust/quote/src/ext.rs b/third_party/rust/quote/src/ext.rs
+--- a/third_party/rust/quote/src/ext.rs
++++ b/third_party/rust/quote/src/ext.rs
+@@ -17,7 +17,7 @@
+
+ /// For use by `ToTokens` implementations.
+ ///
+- /// ```edition2018
++ /// ```
+ /// # use quote::{quote, TokenStreamExt, ToTokens};
+ /// # use proc_macro2::TokenStream;
+ /// #
+@@ -32,29 +32,29 @@
+ /// let tokens = quote!(#X);
+ /// assert_eq!(tokens.to_string(), "true false");
+ /// ```
+- fn append_all<T, I>(&mut self, iter: I)
++ fn append_all<I>(&mut self, iter: I)
+ where
+- T: ToTokens,
+- I: IntoIterator<Item = T>;
++ I: IntoIterator,
++ I::Item: ToTokens;
+
+ /// For use by `ToTokens` implementations.
+ ///
+ /// Appends all of the items in the iterator `I`, separated by the tokens
+ /// `U`.
+- fn append_separated<T, I, U>(&mut self, iter: I, op: U)
++ fn append_separated<I, U>(&mut self, iter: I, op: U)
+ where
+- T: ToTokens,
+- I: IntoIterator<Item = T>,
++ I: IntoIterator,
++ I::Item: ToTokens,
+ U: ToTokens;
+
+ /// For use by `ToTokens` implementations.
+ ///
+ /// Appends all tokens in the iterator `I`, appending `U` after each
+ /// element, including after the last element of the iterator.
+- fn append_terminated<T, I, U>(&mut self, iter: I, term: U)
++ fn append_terminated<I, U>(&mut self, iter: I, term: U)
+ where
+- T: ToTokens,
+- I: IntoIterator<Item = T>,
++ I: IntoIterator,
++ I::Item: ToTokens,
+ U: ToTokens;
+ }
+
+@@ -66,20 +66,20 @@
+ self.extend(iter::once(token.into()));
+ }
+
+- fn append_all<T, I>(&mut self, iter: I)
++ fn append_all<I>(&mut self, iter: I)
+ where
+- T: ToTokens,
+- I: IntoIterator<Item = T>,
++ I: IntoIterator,
++ I::Item: ToTokens,
+ {
+ for token in iter {
+ token.to_tokens(self);
+ }
+ }
+
+- fn append_separated<T, I, U>(&mut self, iter: I, op: U)
++ fn append_separated<I, U>(&mut self, iter: I, op: U)
+ where
+- T: ToTokens,
+- I: IntoIterator<Item = T>,
++ I: IntoIterator,
++ I::Item: ToTokens,
+ U: ToTokens,
+ {
+ for (i, token) in iter.into_iter().enumerate() {
+@@ -90,10 +90,10 @@
+ }
+ }
+
+- fn append_terminated<T, I, U>(&mut self, iter: I, term: U)
++ fn append_terminated<I, U>(&mut self, iter: I, term: U)
+ where
+- T: ToTokens,
+- I: IntoIterator<Item = T>,
++ I: IntoIterator,
++ I::Item: ToTokens,
+ U: ToTokens,
+ {
+ for token in iter {
+diff --git a/third_party/rust/quote/src/format.rs b/third_party/rust/quote/src/format.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/src/format.rs
+@@ -0,0 +1,164 @@
++/// Formatting macro for constructing `Ident`s.
++///
++/// <br>
++///
++/// # Syntax
++///
++/// Syntax is copied from the [`format!`] macro, supporting both positional and
++/// named arguments.
++///
++/// Only a limited set of formatting traits are supported. The current mapping
++/// of format types to traits is:
++///
++/// * `{}` ⇒ [`IdentFragment`]
++/// * `{:o}` ⇒ [`Octal`](`std::fmt::Octal`)
++/// * `{:x}` ⇒ [`LowerHex`](`std::fmt::LowerHex`)
++/// * `{:X}` ⇒ [`UpperHex`](`std::fmt::UpperHex`)
++/// * `{:b}` ⇒ [`Binary`](`std::fmt::Binary`)
++///
++/// See [`std::fmt`] for more information.
++///
++/// <br>
++///
++/// # IdentFragment
++///
++/// Unlike `format!`, this macro uses the [`IdentFragment`] formatting trait by
++/// default. This trait is like `Display`, with a few differences:
++///
++/// * `IdentFragment` is only implemented for a limited set of types, such as
++/// unsigned integers and strings.
++/// * [`Ident`] arguments will have their `r#` prefixes stripped, if present.
++///
++/// [`Ident`]: `proc_macro2::Ident`
++///
++/// <br>
++///
++/// # Hygiene
++///
++/// The [`Span`] of the first `Ident` argument is used as the span of the final
++/// identifier, falling back to [`Span::call_site`] when no identifiers are
++/// provided.
++///
++/// ```
++/// # use quote::format_ident;
++/// # let ident = format_ident!("Ident");
++/// // If `ident` is an Ident, the span of `my_ident` will be inherited from it.
++/// let my_ident = format_ident!("My{}{}", ident, "IsCool");
++/// assert_eq!(my_ident, "MyIdentIsCool");
++/// ```
++///
++/// Alternatively, the span can be overridden by passing the `span` named
++/// argument.
++///
++/// ```
++/// # use quote::format_ident;
++/// # const IGNORE_TOKENS: &'static str = stringify! {
++/// let my_span = /* ... */;
++/// # };
++/// # let my_span = proc_macro2::Span::call_site();
++/// format_ident!("MyIdent", span = my_span);
++/// ```
++///
++/// [`Span`]: `proc_macro2::Span`
++/// [`Span::call_site`]: `proc_macro2::Span::call_site`
++///
++/// <p><br></p>
++///
++/// # Panics
++///
++/// This method will panic if the resulting formatted string is not a valid
++/// identifier.
++///
++/// <br>
++///
++/// # Examples
++///
++/// Composing raw and non-raw identifiers:
++/// ```
++/// # use quote::format_ident;
++/// let my_ident = format_ident!("My{}", "Ident");
++/// assert_eq!(my_ident, "MyIdent");
++///
++/// let raw = format_ident!("r#Raw");
++/// assert_eq!(raw, "r#Raw");
++///
++/// let my_ident_raw = format_ident!("{}Is{}", my_ident, raw);
++/// assert_eq!(my_ident_raw, "MyIdentIsRaw");
++/// ```
++///
++/// Integer formatting options:
++/// ```
++/// # use quote::format_ident;
++/// let num: u32 = 10;
++///
++/// let decimal = format_ident!("Id_{}", num);
++/// assert_eq!(decimal, "Id_10");
++///
++/// let octal = format_ident!("Id_{:o}", num);
++/// assert_eq!(octal, "Id_12");
++///
++/// let binary = format_ident!("Id_{:b}", num);
++/// assert_eq!(binary, "Id_1010");
++///
++/// let lower_hex = format_ident!("Id_{:x}", num);
++/// assert_eq!(lower_hex, "Id_a");
++///
++/// let upper_hex = format_ident!("Id_{:X}", num);
++/// assert_eq!(upper_hex, "Id_A");
++/// ```
++#[macro_export]
++macro_rules! format_ident {
++ ($fmt:expr) => {
++ $crate::format_ident_impl!([
++ ::std::option::Option::None,
++ $fmt
++ ])
++ };
++
++ ($fmt:expr, $($rest:tt)*) => {
++ $crate::format_ident_impl!([
++ ::std::option::Option::None,
++ $fmt
++ ] $($rest)*)
++ };
++}
++
++#[macro_export]
++#[doc(hidden)]
++macro_rules! format_ident_impl {
++ // Final state
++ ([$span:expr, $($fmt:tt)*]) => {
++ $crate::__rt::mk_ident(&format!($($fmt)*), $span)
++ };
++
++ // Span argument
++ ([$old:expr, $($fmt:tt)*] span = $span:expr) => {
++ $crate::format_ident_impl!([$old, $($fmt)*] span = $span,)
++ };
++ ([$old:expr, $($fmt:tt)*] span = $span:expr, $($rest:tt)*) => {
++ $crate::format_ident_impl!([
++ ::std::option::Option::Some::<$crate::__rt::Span>($span),
++ $($fmt)*
++ ] $($rest)*)
++ };
++
++ // Named argument
++ ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr) => {
++ $crate::format_ident_impl!([$span, $($fmt)*] $name = $arg,)
++ };
++ ([$span:expr, $($fmt:tt)*] $name:ident = $arg:expr, $($rest:tt)*) => {
++ match $crate::__rt::IdentFragmentAdapter(&$arg) {
++ arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, $name = arg] $($rest)*),
++ }
++ };
++
++ // Positional argument
++ ([$span:expr, $($fmt:tt)*] $arg:expr) => {
++ $crate::format_ident_impl!([$span, $($fmt)*] $arg,)
++ };
++ ([$span:expr, $($fmt:tt)*] $arg:expr, $($rest:tt)*) => {
++ match $crate::__rt::IdentFragmentAdapter(&$arg) {
++ arg => $crate::format_ident_impl!([$span.or(arg.span()), $($fmt)*, arg] $($rest)*),
++ }
++ };
++}
+diff --git a/third_party/rust/quote/src/ident_fragment.rs b/third_party/rust/quote/src/ident_fragment.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/src/ident_fragment.rs
+@@ -0,0 +1,72 @@
++use proc_macro2::{Ident, Span};
++use std::fmt;
++
++/// Specialized formatting trait used by `format_ident!`.
++///
++/// [`Ident`] arguments formatted using this trait will have their `r#` prefix
++/// stripped, if present.
++///
++/// See [`format_ident!`] for more information.
++pub trait IdentFragment {
++ /// Format this value as an identifier fragment.
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result;
++
++ /// Span associated with this `IdentFragment`.
++ ///
++ /// If non-`None`, may be inherited by formatted identifiers.
++ fn span(&self) -> Option<Span> {
++ None
++ }
++}
++
++impl<'a, T: IdentFragment + ?Sized> IdentFragment for &'a T {
++ fn span(&self) -> Option<Span> {
++ <T as IdentFragment>::span(*self)
++ }
++
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ IdentFragment::fmt(*self, f)
++ }
++}
++
++impl<'a, T: IdentFragment + ?Sized> IdentFragment for &'a mut T {
++ fn span(&self) -> Option<Span> {
++ <T as IdentFragment>::span(*self)
++ }
++
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ IdentFragment::fmt(*self, f)
++ }
++}
++
++impl IdentFragment for Ident {
++ fn span(&self) -> Option<Span> {
++ Some(self.span())
++ }
++
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ let id = self.to_string();
++ if id.starts_with("r#") {
++ fmt::Display::fmt(&id[2..], f)
++ } else {
++ fmt::Display::fmt(&id[..], f)
++ }
++ }
++}
++
++// Limited set of types which this is implemented for, as we want to avoid types
++// which will often include non-identifier characters in their `Display` impl.
++macro_rules! ident_fragment_display {
++ ($($T:ty),*) => {
++ $(
++ impl IdentFragment for $T {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ fmt::Display::fmt(self, f)
++ }
++ }
++ )*
++ }
++}
++
++ident_fragment_display!(bool, str, String);
++ident_fragment_display!(u8, u16, u32, u64, u128, usize);
+diff --git a/third_party/rust/quote/src/lib.rs b/third_party/rust/quote/src/lib.rs
+--- a/third_party/rust/quote/src/lib.rs
++++ b/third_party/rust/quote/src/lib.rs
+@@ -6,8 +6,8 @@
+ //! Procedural macros in Rust receive a stream of tokens as input, execute
+ //! arbitrary Rust code to determine how to manipulate those tokens, and produce
+ //! a stream of tokens to hand back to the compiler to compile into the caller's
+-//! crate. Quasi-quoting is a solution to one piece of that -- producing tokens
+-//! to return to the compiler.
++//! crate. Quasi-quoting is a solution to one piece of that &mdash; producing
++//! tokens to return to the compiler.
+ //!
+ //! The idea of quasi-quoting is that we write *code* that we treat as *data*.
+ //! Within the `quote!` macro, we can write what looks like code to our text
+@@ -21,14 +21,13 @@
+ //! general-purpose Rust quasi-quoting library and is not specific to procedural
+ //! macros.
+ //!
+-//! *Version requirement: Quote supports any compiler version back to Rust's
+-//! very first support for procedural macros in Rust 1.15.0.*
+-//!
+ //! ```toml
+ //! [dependencies]
+-//! quote = "0.6"
++//! quote = "1.0"
+ //! ```
+ //!
++//! <br>
++//!
+ //! # Example
+ //!
+ //! The following quasi-quoted block of code is something you might find in [a]
+@@ -41,7 +40,7 @@
+ //! [a]: https://serde.rs/
+ //! [`quote_spanned!`]: macro.quote_spanned.html
+ //!
+-//! ```edition2018
++//! ```
+ //! # use quote::quote;
+ //! #
+ //! # let generics = "";
+@@ -72,164 +71,45 @@
+ //! }
+ //! };
+ //! ```
+-//!
+-//! # Recursion limit
+-//!
+-//! The `quote!` macro relies on deep recursion so some large invocations may
+-//! fail with "recursion limit reached" when you compile. If it fails, bump up
+-//! the recursion limit by adding `#![recursion_limit = "128"]` to your crate.
+-//! An even higher limit may be necessary for especially large invocations.
+
+ // Quote types in rustdoc of other crates get linked to here.
+-#![doc(html_root_url = "https://docs.rs/quote/0.6.11")]
++#![doc(html_root_url = "https://docs.rs/quote/1.0.2")]
+
+ #[cfg(all(
+ not(all(target_arch = "wasm32", target_os = "unknown")),
+ feature = "proc-macro"
+ ))]
+ extern crate proc_macro;
+-extern crate proc_macro2;
+
+ mod ext;
+-pub use ext::TokenStreamExt;
+-
++mod format;
++mod ident_fragment;
+ mod to_tokens;
+-pub use to_tokens::ToTokens;
+
+ // Not public API.
+ #[doc(hidden)]
+-pub mod __rt {
+- use ext::TokenStreamExt;
+- pub use proc_macro2::*;
+-
+- fn is_ident_start(c: u8) -> bool {
+- (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_'
+- }
+-
+- fn is_ident_continue(c: u8) -> bool {
+- (b'a' <= c && c <= b'z')
+- || (b'A' <= c && c <= b'Z')
+- || c == b'_'
+- || (b'0' <= c && c <= b'9')
+- }
+-
+- fn is_ident(token: &str) -> bool {
+- if token.bytes().all(|digit| digit >= b'0' && digit <= b'9') {
+- return false;
+- }
+-
+- let mut bytes = token.bytes();
+- let first = bytes.next().unwrap();
+- if !is_ident_start(first) {
+- return false;
+- }
+- for ch in bytes {
+- if !is_ident_continue(ch) {
+- return false;
+- }
+- }
+- true
+- }
+-
+- pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) {
+- if is_ident(s) {
+- // Fast path, since idents are the most common token.
+- tokens.append(Ident::new(s, span));
+- } else {
+- let s: TokenStream = s.parse().expect("invalid token stream");
+- tokens.extend(s.into_iter().map(|mut t| {
+- t.set_span(span);
+- t
+- }));
+- }
+- }
++#[path = "runtime.rs"]
++pub mod __rt;
+
+- macro_rules! push_punct {
+- ($name:ident $char1:tt) => {
+- pub fn $name(tokens: &mut TokenStream, span: Span) {
+- let mut punct = Punct::new($char1, Spacing::Alone);
+- punct.set_span(span);
+- tokens.append(punct);
+- }
+- };
+- ($name:ident $char1:tt $char2:tt) => {
+- pub fn $name(tokens: &mut TokenStream, span: Span) {
+- let mut punct = Punct::new($char1, Spacing::Joint);
+- punct.set_span(span);
+- tokens.append(punct);
+- let mut punct = Punct::new($char2, Spacing::Alone);
+- punct.set_span(span);
+- tokens.append(punct);
+- }
+- };
+- ($name:ident $char1:tt $char2:tt $char3:tt) => {
+- pub fn $name(tokens: &mut TokenStream, span: Span) {
+- let mut punct = Punct::new($char1, Spacing::Joint);
+- punct.set_span(span);
+- tokens.append(punct);
+- let mut punct = Punct::new($char2, Spacing::Joint);
+- punct.set_span(span);
+- tokens.append(punct);
+- let mut punct = Punct::new($char3, Spacing::Alone);
+- punct.set_span(span);
+- tokens.append(punct);
+- }
+- };
+- }
++pub use crate::ext::TokenStreamExt;
++pub use crate::ident_fragment::IdentFragment;
++pub use crate::to_tokens::ToTokens;
+
+- push_punct!(push_add '+');
+- push_punct!(push_add_eq '+' '=');
+- push_punct!(push_and '&');
+- push_punct!(push_and_and '&' '&');
+- push_punct!(push_and_eq '&' '=');
+- push_punct!(push_at '@');
+- push_punct!(push_bang '!');
+- push_punct!(push_caret '^');
+- push_punct!(push_caret_eq '^' '=');
+- push_punct!(push_colon ':');
+- push_punct!(push_colon2 ':' ':');
+- push_punct!(push_comma ',');
+- push_punct!(push_div '/');
+- push_punct!(push_div_eq '/' '=');
+- push_punct!(push_dot '.');
+- push_punct!(push_dot2 '.' '.');
+- push_punct!(push_dot3 '.' '.' '.');
+- push_punct!(push_dot_dot_eq '.' '.' '=');
+- push_punct!(push_eq '=');
+- push_punct!(push_eq_eq '=' '=');
+- push_punct!(push_ge '>' '=');
+- push_punct!(push_gt '>');
+- push_punct!(push_le '<' '=');
+- push_punct!(push_lt '<');
+- push_punct!(push_mul_eq '*' '=');
+- push_punct!(push_ne '!' '=');
+- push_punct!(push_or '|');
+- push_punct!(push_or_eq '|' '=');
+- push_punct!(push_or_or '|' '|');
+- push_punct!(push_pound '#');
+- push_punct!(push_question '?');
+- push_punct!(push_rarrow '-' '>');
+- push_punct!(push_larrow '<' '-');
+- push_punct!(push_rem '%');
+- push_punct!(push_rem_eq '%' '=');
+- push_punct!(push_fat_arrow '=' '>');
+- push_punct!(push_semi ';');
+- push_punct!(push_shl '<' '<');
+- push_punct!(push_shl_eq '<' '<' '=');
+- push_punct!(push_shr '>' '>');
+- push_punct!(push_shr_eq '>' '>' '=');
+- push_punct!(push_star '*');
+- push_punct!(push_sub '-');
+- push_punct!(push_sub_eq '-' '=');
+-}
++// Not public API.
++#[doc(hidden)]
++pub mod spanned;
+
+ /// The whole point.
+ ///
+ /// Performs variable interpolation against the input and produces it as
+-/// [`TokenStream`]. For returning tokens to the compiler in a procedural macro, use
+-/// `into()` to build a `TokenStream`.
++/// [`proc_macro2::TokenStream`].
+ ///
+-/// [`TokenStream`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.TokenStream.html
++/// Note: for returning tokens to the compiler in a procedural macro, use
++/// `.into()` on the result to convert to [`proc_macro::TokenStream`].
++///
++/// [`TokenStream`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.TokenStream.html
++///
++/// <br>
+ ///
+ /// # Interpolation
+ ///
+@@ -246,26 +126,30 @@
+ /// Repetition is done using `#(...)*` or `#(...),*` again similar to
+ /// `macro_rules!`. This iterates through the elements of any variable
+ /// interpolated within the repetition and inserts a copy of the repetition body
+-/// for each one. The variables in an interpolation may be anything that
+-/// implements `IntoIterator`, including `Vec` or a pre-existing iterator.
++/// for each one. The variables in an interpolation may be a `Vec`, slice,
++/// `BTreeSet`, or any `Iterator`.
+ ///
+ /// - `#(#var)*` — no separators
+ /// - `#(#var),*` — the character before the asterisk is used as a separator
+ /// - `#( struct #var; )*` — the repetition can contain other tokens
+ /// - `#( #k => println!("{}", #v), )*` — even multiple interpolations
+ ///
++/// <br>
++///
+ /// # Hygiene
+ ///
+ /// Any interpolated tokens preserve the `Span` information provided by their
+ /// `ToTokens` implementation. Tokens that originate within the `quote!`
+ /// invocation are spanned with [`Span::call_site()`].
+ ///
+-/// [`Span::call_site()`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html#method.call_site
++/// [`Span::call_site()`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html#method.call_site
+ ///
+ /// A different span can be provided through the [`quote_spanned!`] macro.
+ ///
+ /// [`quote_spanned!`]: macro.quote_spanned.html
+ ///
++/// <br>
++///
+ /// # Return type
+ ///
+ /// The macro evaluates to an expression of type `proc_macro2::TokenStream`.
+@@ -286,9 +170,11 @@
+ ///
+ /// [`From`]: https://doc.rust-lang.org/std/convert/trait.From.html
+ ///
++/// <br>
++///
+ /// # Examples
+ ///
+-/// ## Procedural macro
++/// ### Procedural macro
+ ///
+ /// The structure of a basic procedural macro is as follows. Refer to the [Syn]
+ /// crate for further useful guidance on using `quote!` as part of a procedural
+@@ -296,12 +182,14 @@
+ ///
+ /// [Syn]: https://github.com/dtolnay/syn
+ ///
+-/// ```edition2018
++/// ```
+ /// # #[cfg(any())]
+ /// extern crate proc_macro;
+-/// # use proc_macro2 as proc_macro;
++/// # extern crate proc_macro2;
+ ///
++/// # #[cfg(any())]
+ /// use proc_macro::TokenStream;
++/// # use proc_macro2::TokenStream;
+ /// use quote::quote;
+ ///
+ /// # const IGNORE_TOKENS: &'static str = stringify! {
+@@ -331,14 +219,16 @@
+ /// }
+ /// ```
+ ///
+-/// ## Combining quoted fragments
++/// <p><br></p>
++///
++/// ### Combining quoted fragments
+ ///
+ /// Usually you don't end up constructing an entire final `TokenStream` in one
+ /// piece. Different parts may come from different helper functions. The tokens
+ /// produced by `quote!` themselves implement `ToTokens` and so can be
+ /// interpolated into later `quote!` invocations to build up a final result.
+ ///
+-/// ```edition2018
++/// ```
+ /// # use quote::quote;
+ /// #
+ /// let type_definition = quote! {...};
+@@ -350,7 +240,9 @@
+ /// };
+ /// ```
+ ///
+-/// ## Constructing identifiers
++/// <p><br></p>
++///
++/// ### Constructing identifiers
+ ///
+ /// Suppose we have an identifier `ident` which came from somewhere in a macro
+ /// input and we need to modify it in some way for the macro output. Let's
+@@ -360,7 +252,7 @@
+ /// behavior of concatenating them. The underscore and the identifier will
+ /// continue to be two separate tokens as if you had written `_ x`.
+ ///
+-/// ```edition2018
++/// ```
+ /// # use proc_macro2::{self as syn, Span};
+ /// # use quote::quote;
+ /// #
+@@ -373,10 +265,28 @@
+ /// # ;
+ /// ```
+ ///
+-/// The solution is to perform token-level manipulations using the APIs provided
+-/// by Syn and proc-macro2.
++/// The solution is to build a new identifier token with the correct value. As
++/// this is such a common case, the [`format_ident!`] macro provides a
++/// convenient utility for doing so correctly.
+ ///
+-/// ```edition2018
++/// ```
++/// # use proc_macro2::{Ident, Span};
++/// # use quote::{format_ident, quote};
++/// #
++/// # let ident = Ident::new("i", Span::call_site());
++/// #
++/// let varname = format_ident!("_{}", ident);
++/// quote! {
++/// let mut #varname = 0;
++/// }
++/// # ;
++/// ```
++///
++/// Alternatively, the APIs provided by Syn and proc-macro2 can be used to
++/// directly build the identifier. This is roughly equivalent to the above, but
++/// will not handle `ident` being a raw identifier.
++///
++/// ```
+ /// # use proc_macro2::{self as syn, Span};
+ /// # use quote::quote;
+ /// #
+@@ -390,13 +300,15 @@
+ /// # ;
+ /// ```
+ ///
+-/// ## Making method calls
++/// <p><br></p>
++///
++/// ### Making method calls
+ ///
+ /// Let's say our macro requires some type specified in the macro input to have
+ /// a constructor called `new`. We have the type in a variable called
+ /// `field_type` of type `syn::Type` and want to invoke the constructor.
+ ///
+-/// ```edition2018
++/// ```
+ /// # use quote::quote;
+ /// #
+ /// # let field_type = quote!(...);
+@@ -414,7 +326,7 @@
+ /// syntax. Ordinarily in handwritten Rust we would write `Vec::<i32>::new()`
+ /// but for macros often the following is more convenient.
+ ///
+-/// ```edition2018
++/// ```
+ /// # use quote::quote;
+ /// #
+ /// # let field_type = quote!(...);
+@@ -429,7 +341,7 @@
+ ///
+ /// A similar pattern is appropriate for trait methods.
+ ///
+-/// ```edition2018
++/// ```
+ /// # use quote::quote;
+ /// #
+ /// # let field_type = quote!(...);
+@@ -439,23 +351,137 @@
+ /// }
+ /// # ;
+ /// ```
+-#[macro_export(local_inner_macros)]
++///
++/// <p><br></p>
++///
++/// ### Interpolating text inside of doc comments
++///
++/// Neither doc comments nor string literals get interpolation behavior in
++/// quote:
++///
++/// ```compile_fail
++/// quote! {
++/// /// try to interpolate: #ident
++/// ///
++/// /// ...
++/// }
++/// ```
++///
++/// ```compile_fail
++/// quote! {
++/// #[doc = "try to interpolate: #ident"]
++/// }
++/// ```
++///
++/// Macro calls in a doc attribute are not valid syntax:
++///
++/// ```compile_fail
++/// quote! {
++/// #[doc = concat!("try to interpolate: ", stringify!(#ident))]
++/// }
++/// ```
++///
++/// Instead the best way to build doc comments that involve variables is by
++/// formatting the doc string literal outside of quote.
++///
++/// ```rust
++/// # use proc_macro2::{Ident, Span};
++/// # use quote::quote;
++/// #
++/// # const IGNORE: &str = stringify! {
++/// let msg = format!(...);
++/// # };
++/// #
++/// # let ident = Ident::new("var", Span::call_site());
++/// # let msg = format!("try to interpolate: {}", ident);
++/// quote! {
++/// #[doc = #msg]
++/// ///
++/// /// ...
++/// }
++/// # ;
++/// ```
++///
++/// <p><br></p>
++///
++/// ### Indexing into a tuple struct
++///
++/// When interpolating indices of a tuple or tuple struct, we need them not to
++/// appears suffixed as integer literals by interpolating them as [`syn::Index`]
++/// instead.
++///
++/// [`syn::Index`]: https://docs.rs/syn/1.0/syn/struct.Index.html
++///
++/// ```compile_fail
++/// let i = 0usize..self.fields.len();
++///
++/// // expands to 0 + self.0usize.heap_size() + self.1usize.heap_size() + ...
++/// // which is not valid syntax
++/// quote! {
++/// 0 #( + self.#i.heap_size() )*
++/// }
++/// ```
++///
++/// ```
++/// # use proc_macro2::{Ident, TokenStream};
++/// # use quote::quote;
++/// #
++/// # mod syn {
++/// # use proc_macro2::{Literal, TokenStream};
++/// # use quote::{ToTokens, TokenStreamExt};
++/// #
++/// # pub struct Index(usize);
++/// #
++/// # impl From<usize> for Index {
++/// # fn from(i: usize) -> Self {
++/// # Index(i)
++/// # }
++/// # }
++/// #
++/// # impl ToTokens for Index {
++/// # fn to_tokens(&self, tokens: &mut TokenStream) {
++/// # tokens.append(Literal::usize_unsuffixed(self.0));
++/// # }
++/// # }
++/// # }
++/// #
++/// # struct Struct {
++/// # fields: Vec<Ident>,
++/// # }
++/// #
++/// # impl Struct {
++/// # fn example(&self) -> TokenStream {
++/// let i = (0..self.fields.len()).map(syn::Index::from);
++///
++/// // expands to 0 + self.0.heap_size() + self.1.heap_size() + ...
++/// quote! {
++/// 0 #( + self.#i.heap_size() )*
++/// }
++/// # }
++/// # }
++/// ```
++#[macro_export]
+ macro_rules! quote {
+- ($($tt:tt)*) => (quote_spanned!($crate::__rt::Span::call_site()=> $($tt)*));
++ ($($tt:tt)*) => {
++ $crate::quote_spanned!($crate::__rt::Span::call_site()=> $($tt)*)
++ };
+ }
+
+ /// Same as `quote!`, but applies a given span to all tokens originating within
+ /// the macro invocation.
+ ///
++/// <br>
++///
+ /// # Syntax
+ ///
+ /// A span expression of type [`Span`], followed by `=>`, followed by the tokens
+-/// to quote. The span expression should be brief -- use a variable for anything
+-/// more than a few characters. There should be no space before the `=>` token.
++/// to quote. The span expression should be brief &mdash; use a variable for
++/// anything more than a few characters. There should be no space before the
++/// `=>` token.
+ ///
+-/// [`Span`]: https://docs.rs/proc-macro2/0.4/proc_macro2/struct.Span.html
++/// [`Span`]: https://docs.rs/proc-macro2/1.0/proc_macro2/struct.Span.html
+ ///
+-/// ```edition2018
++/// ```
+ /// # use proc_macro2::Span;
+ /// # use quote::quote_spanned;
+ /// #
+@@ -480,12 +506,16 @@
+ /// being evaluated in the context of the procedural macro and the remaining
+ /// tokens being evaluated in the generated code.
+ ///
++/// <br>
++///
+ /// # Hygiene
+ ///
+ /// Any interpolated tokens preserve the `Span` information provided by their
+ /// `ToTokens` implementation. Tokens that originate within the `quote_spanned!`
+ /// invocation are spanned with the given span argument.
+ ///
++/// <br>
++///
+ /// # Example
+ ///
+ /// The following procedural macro code uses `quote_spanned!` to assert that a
+@@ -494,7 +524,7 @@
+ ///
+ /// [`Sync`]: https://doc.rust-lang.org/std/marker/trait.Sync.html
+ ///
+-/// ```edition2018
++/// ```
+ /// # use quote::{quote_spanned, TokenStreamExt, ToTokens};
+ /// # use proc_macro2::{Span, TokenStream};
+ /// #
+@@ -532,438 +562,387 @@
+ ///
+ /// In this example it is important for the where-clause to be spanned with the
+ /// line/column information of the user's input type so that error messages are
+-/// placed appropriately by the compiler. But it is also incredibly important
+-/// that `Sync` resolves at the macro definition site and not the macro call
+-/// site. If we resolve `Sync` at the same span that the user's type is going to
+-/// be resolved, then they could bypass our check by defining their own trait
+-/// named `Sync` that is implemented for their type.
+-#[macro_export(local_inner_macros)]
++/// placed appropriately by the compiler.
++#[macro_export]
+ macro_rules! quote_spanned {
+- ($span:expr=> $($tt:tt)*) => {
+- {
+- let mut _s = $crate::__rt::TokenStream::new();
+- let _span = $span;
+- quote_each_token!(_s _span $($tt)*);
+- _s
+- }
++ ($span:expr=> $($tt:tt)*) => {{
++ let mut _s = $crate::__rt::TokenStream::new();
++ let _span: $crate::__rt::Span = $span;
++ $crate::quote_each_token!(_s _span $($tt)*);
++ _s
++ }};
++}
++
++// Extract the names of all #metavariables and pass them to the $call macro.
++//
++// in: pounded_var_names!(then!(...) a #b c #( #d )* #e)
++// out: then!(... b);
++// then!(... d);
++// then!(... e);
++#[macro_export]
++#[doc(hidden)]
++macro_rules! pounded_var_names {
++ ($call:ident! $extra:tt $($tts:tt)*) => {
++ $crate::pounded_var_names_with_context!($call! $extra
++ (@ $($tts)*)
++ ($($tts)* @)
++ )
++ };
++}
++
++#[macro_export]
++#[doc(hidden)]
++macro_rules! pounded_var_names_with_context {
++ ($call:ident! $extra:tt ($($b1:tt)*) ($($curr:tt)*)) => {
++ $(
++ $crate::pounded_var_with_context!($call! $extra $b1 $curr);
++ )*
+ };
+ }
+
+-// Extract the names of all #metavariables and pass them to the $finish macro.
+-//
+-// in: pounded_var_names!(then () a #b c #( #d )* #e)
+-// out: then!(() b d e)
+-#[macro_export(local_inner_macros)]
++#[macro_export]
+ #[doc(hidden)]
+-macro_rules! pounded_var_names {
+- ($finish:ident ($($found:ident)*) # ( $($inner:tt)* ) $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++macro_rules! pounded_var_with_context {
++ ($call:ident! $extra:tt $b1:tt ( $($inner:tt)* )) => {
++ $crate::pounded_var_names!($call! $extra $($inner)*);
+ };
+
+- ($finish:ident ($($found:ident)*) # [ $($inner:tt)* ] $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ ($call:ident! $extra:tt $b1:tt [ $($inner:tt)* ]) => {
++ $crate::pounded_var_names!($call! $extra $($inner)*);
+ };
+
+- ($finish:ident ($($found:ident)*) # { $($inner:tt)* } $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++ ($call:ident! $extra:tt $b1:tt { $($inner:tt)* }) => {
++ $crate::pounded_var_names!($call! $extra $($inner)*);
++ };
++
++ ($call:ident!($($extra:tt)*) # $var:ident) => {
++ $crate::$call!($($extra)* $var);
+ };
+
+- ($finish:ident ($($found:ident)*) # $first:ident $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)* $first) $($rest)*)
+- };
++ ($call:ident! $extra:tt $b1:tt $curr:tt) => {};
++}
+
+- ($finish:ident ($($found:ident)*) ( $($inner:tt)* ) $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
+- };
+-
+- ($finish:ident ($($found:ident)*) [ $($inner:tt)* ] $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
++#[macro_export]
++#[doc(hidden)]
++macro_rules! quote_bind_into_iter {
++ ($has_iter:ident $var:ident) => {
++ // `mut` may be unused if $var occurs multiple times in the list.
++ #[allow(unused_mut)]
++ let (mut $var, i) = $var.quote_into_iter();
++ let $has_iter = $has_iter | i;
+ };
+-
+- ($finish:ident ($($found:ident)*) { $($inner:tt)* } $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)*) $($inner)* $($rest)*)
+- };
++}
+
+- ($finish:ident ($($found:ident)*) $ignore:tt $($rest:tt)*) => {
+- pounded_var_names!($finish ($($found)*) $($rest)*)
+- };
+-
+- ($finish:ident ($($found:ident)*)) => {
+- $finish!(() $($found)*)
++#[macro_export]
++#[doc(hidden)]
++macro_rules! quote_bind_next_or_break {
++ ($var:ident) => {
++ let $var = match $var.next() {
++ Some(_x) => $crate::__rt::RepInterp(_x),
++ None => break,
++ };
+ };
+ }
+
+-// in: nested_tuples_pat!(() a b c d e)
+-// out: ((((a b) c) d) e)
+-//
+-// in: nested_tuples_pat!(() a)
+-// out: a
+-#[macro_export(local_inner_macros)]
++#[macro_export]
+ #[doc(hidden)]
+-macro_rules! nested_tuples_pat {
+- (()) => {
+- &()
++macro_rules! quote_each_token {
++ ($tokens:ident $span:ident $($tts:tt)*) => {
++ $crate::quote_tokens_with_context!($tokens $span
++ (@ @ @ @ @ @ $($tts)*)
++ (@ @ @ @ @ $($tts)* @)
++ (@ @ @ @ $($tts)* @ @)
++ (@ @ @ $(($tts))* @ @ @)
++ (@ @ $($tts)* @ @ @ @)
++ (@ $($tts)* @ @ @ @ @)
++ ($($tts)* @ @ @ @ @ @)
++ );
+ };
+-
+- (() $first:ident $($rest:ident)*) => {
+- nested_tuples_pat!(($first) $($rest)*)
+- };
++}
+
+- (($pat:pat) $first:ident $($rest:ident)*) => {
+- nested_tuples_pat!((($pat, $first)) $($rest)*)
+- };
+-
+- (($done:pat)) => {
+- $done
++#[macro_export]
++#[doc(hidden)]
++macro_rules! quote_tokens_with_context {
++ ($tokens:ident $span:ident
++ ($($b3:tt)*) ($($b2:tt)*) ($($b1:tt)*)
++ ($($curr:tt)*)
++ ($($a1:tt)*) ($($a2:tt)*) ($($a3:tt)*)
++ ) => {
++ $(
++ $crate::quote_token_with_context!($tokens $span $b3 $b2 $b1 $curr $a1 $a2 $a3);
++ )*
+ };
+ }
+
+-// in: multi_zip_expr!(() a b c d e)
+-// out: a.into_iter().zip(b).zip(c).zip(d).zip(e)
+-//
+-// in: multi_zip_iter!(() a)
+-// out: a
+-#[macro_export(local_inner_macros)]
++#[macro_export]
+ #[doc(hidden)]
+-macro_rules! multi_zip_expr {
+- (()) => {
+- &[]
+- };
++macro_rules! quote_token_with_context {
++ ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt @ $a1:tt $a2:tt $a3:tt) => {};
++
++ ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) * $a3:tt) => {{
++ use $crate::__rt::ext::*;
++ let has_iter = $crate::__rt::ThereIsNoIteratorInRepetition;
++ $crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
++ let _: $crate::__rt::HasIterator = has_iter;
++ // This is `while true` instead of `loop` because if there are no
++ // iterators used inside of this repetition then the body would not
++ // contain any `break`, so the compiler would emit unreachable code
++ // warnings on anything below the loop. We use has_iter to detect and
++ // fail to compile when there are no iterators, so here we just work
++ // around the unneeded extra warning.
++ while true {
++ $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
++ $crate::quote_each_token!($tokens $span $($inner)*);
++ }
++ }};
++ ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) * $a2:tt $a3:tt) => {};
++ ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) (*) $a1:tt $a2:tt $a3:tt) => {};
+
+- (() $single:ident) => {
+- $single
+- };
+-
+- (() $first:ident $($rest:ident)*) => {
+- multi_zip_expr!(($first.into_iter()) $($rest)*)
++ ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) ( $($inner:tt)* ) $sep:tt *) => {{
++ use $crate::__rt::ext::*;
++ let mut _i = 0usize;
++ let has_iter = $crate::__rt::ThereIsNoIteratorInRepetition;
++ $crate::pounded_var_names!(quote_bind_into_iter!(has_iter) () $($inner)*);
++ let _: $crate::__rt::HasIterator = has_iter;
++ while true {
++ $crate::pounded_var_names!(quote_bind_next_or_break!() () $($inner)*);
++ if _i > 0 {
++ $crate::quote_token!($tokens $span $sep);
++ }
++ _i += 1;
++ $crate::quote_each_token!($tokens $span $($inner)*);
++ }
++ }};
++ ($tokens:ident $span:ident $b3:tt $b2:tt # (( $($inner:tt)* )) $sep:tt * $a3:tt) => {};
++ ($tokens:ident $span:ident $b3:tt # ( $($inner:tt)* ) ($sep:tt) * $a2:tt $a3:tt) => {};
++ ($tokens:ident $span:ident # ( $($inner:tt)* ) * (*) $a1:tt $a2:tt $a3:tt) => {
++ // https://github.com/dtolnay/quote/issues/130
++ $crate::quote_token!($tokens $span *);
+ };
++ ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt (*) $a1:tt $a2:tt $a3:tt) => {};
+
+- (($zips:expr) $first:ident $($rest:ident)*) => {
+- multi_zip_expr!(($zips.zip($first)) $($rest)*)
++ ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt (#) $var:ident $a2:tt $a3:tt) => {
++ $crate::ToTokens::to_tokens(&$var, &mut $tokens);
+ };
+-
+- (($done:expr)) => {
+- $done
++ ($tokens:ident $span:ident $b3:tt $b2:tt # ($var:ident) $a1:tt $a2:tt $a3:tt) => {};
++ ($tokens:ident $span:ident $b3:tt $b2:tt $b1:tt ($curr:tt) $a1:tt $a2:tt $a3:tt) => {
++ $crate::quote_token!($tokens $span $curr);
+ };
+ }
+
+-#[macro_export(local_inner_macros)]
++#[macro_export]
+ #[doc(hidden)]
+-macro_rules! quote_each_token {
+- ($tokens:ident $span:ident) => {};
+-
+- ($tokens:ident $span:ident # ! $($rest:tt)*) => {
+- quote_each_token!($tokens $span #);
+- quote_each_token!($tokens $span !);
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident # ( $($inner:tt)* ) * $($rest:tt)*) => {
+- for pounded_var_names!(nested_tuples_pat () $($inner)*)
+- in pounded_var_names!(multi_zip_expr () $($inner)*) {
+- quote_each_token!($tokens $span $($inner)*);
+- }
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident # ( $($inner:tt)* ) $sep:tt * $($rest:tt)*) => {
+- for (_i, pounded_var_names!(nested_tuples_pat () $($inner)*))
+- in pounded_var_names!(multi_zip_expr () $($inner)*).into_iter().enumerate() {
+- if _i > 0 {
+- quote_each_token!($tokens $span $sep);
+- }
+- quote_each_token!($tokens $span $($inner)*);
+- }
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident # [ $($inner:tt)* ] $($rest:tt)*) => {
+- quote_each_token!($tokens $span #);
++macro_rules! quote_token {
++ ($tokens:ident $span:ident ( $($inner:tt)* )) => {
+ $tokens.extend({
+ let mut g = $crate::__rt::Group::new(
+- $crate::__rt::Delimiter::Bracket,
+- quote_spanned!($span=> $($inner)*),
++ $crate::__rt::Delimiter::Parenthesis,
++ $crate::quote_spanned!($span=> $($inner)*),
+ );
+ g.set_span($span);
+ Some($crate::__rt::TokenTree::from(g))
+ });
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident # $first:ident $($rest:tt)*) => {
+- $crate::ToTokens::to_tokens(&$first, &mut $tokens);
+- quote_each_token!($tokens $span $($rest)*);
+ };
+
+- ($tokens:ident $span:ident ( $($first:tt)* ) $($rest:tt)*) => {
+- $tokens.extend({
+- let mut g = $crate::__rt::Group::new(
+- $crate::__rt::Delimiter::Parenthesis,
+- quote_spanned!($span=> $($first)*),
+- );
+- g.set_span($span);
+- Some($crate::__rt::TokenTree::from(g))
+- });
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident [ $($first:tt)* ] $($rest:tt)*) => {
++ ($tokens:ident $span:ident [ $($inner:tt)* ]) => {
+ $tokens.extend({
+ let mut g = $crate::__rt::Group::new(
+ $crate::__rt::Delimiter::Bracket,
+- quote_spanned!($span=> $($first)*),
+- );
+- g.set_span($span);
+- Some($crate::__rt::TokenTree::from(g))
+- });
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident { $($first:tt)* } $($rest:tt)*) => {
+- $tokens.extend({
+- let mut g = $crate::__rt::Group::new(
+- $crate::__rt::Delimiter::Brace,
+- quote_spanned!($span=> $($first)*),
++ $crate::quote_spanned!($span=> $($inner)*),
+ );
+ g.set_span($span);
+ Some($crate::__rt::TokenTree::from(g))
+ });
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident + $($rest:tt)*) => {
+- $crate::__rt::push_add(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
+- };
+-
+- ($tokens:ident $span:ident += $($rest:tt)*) => {
+- $crate::__rt::push_add_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
+ };
+
+- ($tokens:ident $span:ident & $($rest:tt)*) => {
+- $crate::__rt::push_and(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident { $($inner:tt)* }) => {
++ $tokens.extend({
++ let mut g = $crate::__rt::Group::new(
++ $crate::__rt::Delimiter::Brace,
++ $crate::quote_spanned!($span=> $($inner)*),
++ );
++ g.set_span($span);
++ Some($crate::__rt::TokenTree::from(g))
++ });
+ };
+
+- ($tokens:ident $span:ident && $($rest:tt)*) => {
+- $crate::__rt::push_and_and(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident +) => {
++ $crate::__rt::push_add(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident &= $($rest:tt)*) => {
+- $crate::__rt::push_and_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident +=) => {
++ $crate::__rt::push_add_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident @ $($rest:tt)*) => {
+- $crate::__rt::push_at(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident &) => {
++ $crate::__rt::push_and(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident ! $($rest:tt)*) => {
+- $crate::__rt::push_bang(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident &&) => {
++ $crate::__rt::push_and_and(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident ^ $($rest:tt)*) => {
+- $crate::__rt::push_caret(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident &=) => {
++ $crate::__rt::push_and_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident ^= $($rest:tt)*) => {
+- $crate::__rt::push_caret_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident @) => {
++ $crate::__rt::push_at(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident : $($rest:tt)*) => {
+- $crate::__rt::push_colon(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident !) => {
++ $crate::__rt::push_bang(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident :: $($rest:tt)*) => {
+- $crate::__rt::push_colon2(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ^) => {
++ $crate::__rt::push_caret(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident , $($rest:tt)*) => {
+- $crate::__rt::push_comma(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ^=) => {
++ $crate::__rt::push_caret_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident / $($rest:tt)*) => {
+- $crate::__rt::push_div(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident :) => {
++ $crate::__rt::push_colon(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident /= $($rest:tt)*) => {
+- $crate::__rt::push_div_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ::) => {
++ $crate::__rt::push_colon2(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident . $($rest:tt)*) => {
+- $crate::__rt::push_dot(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ,) => {
++ $crate::__rt::push_comma(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident .. $($rest:tt)*) => {
+- $crate::__rt::push_dot2(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident /) => {
++ $crate::__rt::push_div(&mut $tokens, $span);
++ };
++
++ ($tokens:ident $span:ident /=) => {
++ $crate::__rt::push_div_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident ... $($rest:tt)*) => {
+- $crate::__rt::push_dot3(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident .) => {
++ $crate::__rt::push_dot(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident ..= $($rest:tt)*) => {
+- $crate::__rt::push_dot_dot_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ..) => {
++ $crate::__rt::push_dot2(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident = $($rest:tt)*) => {
+- $crate::__rt::push_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ...) => {
++ $crate::__rt::push_dot3(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident == $($rest:tt)*) => {
+- $crate::__rt::push_eq_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ..=) => {
++ $crate::__rt::push_dot_dot_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident >= $($rest:tt)*) => {
+- $crate::__rt::push_ge(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident =) => {
++ $crate::__rt::push_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident > $($rest:tt)*) => {
+- $crate::__rt::push_gt(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ==) => {
++ $crate::__rt::push_eq_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident <= $($rest:tt)*) => {
+- $crate::__rt::push_le(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident >=) => {
++ $crate::__rt::push_ge(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident < $($rest:tt)*) => {
+- $crate::__rt::push_lt(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident >) => {
++ $crate::__rt::push_gt(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident *= $($rest:tt)*) => {
+- $crate::__rt::push_mul_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident <=) => {
++ $crate::__rt::push_le(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident != $($rest:tt)*) => {
+- $crate::__rt::push_ne(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident <) => {
++ $crate::__rt::push_lt(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident | $($rest:tt)*) => {
+- $crate::__rt::push_or(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident *=) => {
++ $crate::__rt::push_mul_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident |= $($rest:tt)*) => {
+- $crate::__rt::push_or_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident !=) => {
++ $crate::__rt::push_ne(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident || $($rest:tt)*) => {
+- $crate::__rt::push_or_or(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident |) => {
++ $crate::__rt::push_or(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident # $($rest:tt)*) => {
+- $crate::__rt::push_pound(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident |=) => {
++ $crate::__rt::push_or_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident ? $($rest:tt)*) => {
+- $crate::__rt::push_question(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ||) => {
++ $crate::__rt::push_or_or(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident -> $($rest:tt)*) => {
+- $crate::__rt::push_rarrow(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident #) => {
++ $crate::__rt::push_pound(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident <- $($rest:tt)*) => {
+- $crate::__rt::push_larrow(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ?) => {
++ $crate::__rt::push_question(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident % $($rest:tt)*) => {
+- $crate::__rt::push_rem(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ->) => {
++ $crate::__rt::push_rarrow(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident %= $($rest:tt)*) => {
+- $crate::__rt::push_rem_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident <-) => {
++ $crate::__rt::push_larrow(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident => $($rest:tt)*) => {
+- $crate::__rt::push_fat_arrow(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident %) => {
++ $crate::__rt::push_rem(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident ; $($rest:tt)*) => {
+- $crate::__rt::push_semi(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident %=) => {
++ $crate::__rt::push_rem_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident << $($rest:tt)*) => {
+- $crate::__rt::push_shl(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident =>) => {
++ $crate::__rt::push_fat_arrow(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident <<= $($rest:tt)*) => {
+- $crate::__rt::push_shl_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident ;) => {
++ $crate::__rt::push_semi(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident >> $($rest:tt)*) => {
+- $crate::__rt::push_shr(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident <<) => {
++ $crate::__rt::push_shl(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident >>= $($rest:tt)*) => {
+- $crate::__rt::push_shr_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident <<=) => {
++ $crate::__rt::push_shl_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident * $($rest:tt)*) => {
+- $crate::__rt::push_star(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident >>) => {
++ $crate::__rt::push_shr(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident - $($rest:tt)*) => {
+- $crate::__rt::push_sub(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident >>=) => {
++ $crate::__rt::push_shr_eq(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident -= $($rest:tt)*) => {
+- $crate::__rt::push_sub_eq(&mut $tokens, $span);
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident *) => {
++ $crate::__rt::push_star(&mut $tokens, $span);
+ };
+
+- ($tokens:ident $span:ident $first:tt $($rest:tt)*) => {
+- $crate::__rt::parse(&mut $tokens, $span, quote_stringify!($first));
+- quote_each_token!($tokens $span $($rest)*);
++ ($tokens:ident $span:ident -) => {
++ $crate::__rt::push_sub(&mut $tokens, $span);
++ };
++
++ ($tokens:ident $span:ident -=) => {
++ $crate::__rt::push_sub_eq(&mut $tokens, $span);
++ };
++
++ ($tokens:ident $span:ident $other:tt) => {
++ $crate::__rt::parse(&mut $tokens, $span, stringify!($other));
+ };
+ }
+-
+-// Unhygienically invoke whatever `stringify` the caller has in scope i.e. not a
+-// local macro. The macros marked `local_inner_macros` above cannot invoke
+-// `stringify` directly.
+-#[macro_export]
+-#[doc(hidden)]
+-macro_rules! quote_stringify {
+- ($tt:tt) => {
+- stringify!($tt)
+- };
+-}
+diff --git a/third_party/rust/quote/src/runtime.rs b/third_party/rust/quote/src/runtime.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/src/runtime.rs
+@@ -0,0 +1,373 @@
++use crate::{IdentFragment, ToTokens, TokenStreamExt};
++use std::fmt;
++use std::ops::BitOr;
++
++pub use proc_macro2::*;
++
++pub struct HasIterator; // True
++pub struct ThereIsNoIteratorInRepetition; // False
++
++impl BitOr<ThereIsNoIteratorInRepetition> for ThereIsNoIteratorInRepetition {
++ type Output = ThereIsNoIteratorInRepetition;
++ fn bitor(self, _rhs: ThereIsNoIteratorInRepetition) -> ThereIsNoIteratorInRepetition {
++ ThereIsNoIteratorInRepetition
++ }
++}
++
++impl BitOr<ThereIsNoIteratorInRepetition> for HasIterator {
++ type Output = HasIterator;
++ fn bitor(self, _rhs: ThereIsNoIteratorInRepetition) -> HasIterator {
++ HasIterator
++ }
++}
++
++impl BitOr<HasIterator> for ThereIsNoIteratorInRepetition {
++ type Output = HasIterator;
++ fn bitor(self, _rhs: HasIterator) -> HasIterator {
++ HasIterator
++ }
++}
++
++impl BitOr<HasIterator> for HasIterator {
++ type Output = HasIterator;
++ fn bitor(self, _rhs: HasIterator) -> HasIterator {
++ HasIterator
++ }
++}
++
++/// Extension traits used by the implementation of `quote!`. These are defined
++/// in separate traits, rather than as a single trait due to ambiguity issues.
++///
++/// These traits expose a `quote_into_iter` method which should allow calling
++/// whichever impl happens to be applicable. Calling that method repeatedly on
++/// the returned value should be idempotent.
++pub mod ext {
++ use super::RepInterp;
++ use super::{HasIterator as HasIter, ThereIsNoIteratorInRepetition as DoesNotHaveIter};
++ use crate::ToTokens;
++ use std::collections::btree_set::{self, BTreeSet};
++ use std::slice;
++
++ /// Extension trait providing the `quote_into_iter` method on iterators.
++ pub trait RepIteratorExt: Iterator + Sized {
++ fn quote_into_iter(self) -> (Self, HasIter) {
++ (self, HasIter)
++ }
++ }
++
++ impl<T: Iterator> RepIteratorExt for T {}
++
++ /// Extension trait providing the `quote_into_iter` method for
++ /// non-iterable types. These types interpolate the same value in each
++ /// iteration of the repetition.
++ pub trait RepToTokensExt {
++ /// Pretend to be an iterator for the purposes of `quote_into_iter`.
++ /// This allows repeated calls to `quote_into_iter` to continue
++ /// correctly returning DoesNotHaveIter.
++ fn next(&self) -> Option<&Self> {
++ Some(self)
++ }
++
++ fn quote_into_iter(&self) -> (&Self, DoesNotHaveIter) {
++ (self, DoesNotHaveIter)
++ }
++ }
++
++ impl<T: ToTokens + ?Sized> RepToTokensExt for T {}
++
++ /// Extension trait providing the `quote_into_iter` method for types that
++ /// can be referenced as an iterator.
++ pub trait RepAsIteratorExt<'q> {
++ type Iter: Iterator;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter);
++ }
++
++ impl<'q, 'a, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &'a T {
++ type Iter = T::Iter;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) {
++ <T as RepAsIteratorExt>::quote_into_iter(*self)
++ }
++ }
++
++ impl<'q, 'a, T: RepAsIteratorExt<'q> + ?Sized> RepAsIteratorExt<'q> for &'a mut T {
++ type Iter = T::Iter;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) {
++ <T as RepAsIteratorExt>::quote_into_iter(*self)
++ }
++ }
++
++ impl<'q, T: 'q> RepAsIteratorExt<'q> for [T] {
++ type Iter = slice::Iter<'q, T>;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) {
++ (self.iter(), HasIter)
++ }
++ }
++
++ impl<'q, T: 'q> RepAsIteratorExt<'q> for Vec<T> {
++ type Iter = slice::Iter<'q, T>;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) {
++ (self.iter(), HasIter)
++ }
++ }
++
++ impl<'q, T: 'q> RepAsIteratorExt<'q> for BTreeSet<T> {
++ type Iter = btree_set::Iter<'q, T>;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) {
++ (self.iter(), HasIter)
++ }
++ }
++
++ macro_rules! array_rep_slice {
++ ($($l:tt)*) => {
++ $(
++ impl<'q, T: 'q> RepAsIteratorExt<'q> for [T; $l] {
++ type Iter = slice::Iter<'q, T>;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) {
++ (self.iter(), HasIter)
++ }
++ }
++ )*
++ }
++ }
++
++ array_rep_slice!(
++ 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16
++ 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32
++ );
++
++ impl<'q, T: RepAsIteratorExt<'q>> RepAsIteratorExt<'q> for RepInterp<T> {
++ type Iter = T::Iter;
++
++ fn quote_into_iter(&'q self) -> (Self::Iter, HasIter) {
++ self.0.quote_into_iter()
++ }
++ }
++}
++
++// Helper type used within interpolations to allow for repeated binding names.
++// Implements the relevant traits, and exports a dummy `next()` method.
++#[derive(Copy, Clone)]
++pub struct RepInterp<T>(pub T);
++
++impl<T> RepInterp<T> {
++ // This method is intended to look like `Iterator::next`, and is called when
++ // a name is bound multiple times, as the previous binding will shadow the
++ // original `Iterator` object. This allows us to avoid advancing the
++ // iterator multiple times per iteration.
++ pub fn next(self) -> Option<T> {
++ Some(self.0)
++ }
++}
++
++impl<T: Iterator> Iterator for RepInterp<T> {
++ type Item = T::Item;
++
++ fn next(&mut self) -> Option<Self::Item> {
++ self.0.next()
++ }
++}
++
++impl<T: ToTokens> ToTokens for RepInterp<T> {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ self.0.to_tokens(tokens);
++ }
++}
++
++fn is_ident_start(c: u8) -> bool {
++ (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_'
++}
++
++fn is_ident_continue(c: u8) -> bool {
++ (b'a' <= c && c <= b'z') || (b'A' <= c && c <= b'Z') || c == b'_' || (b'0' <= c && c <= b'9')
++}
++
++fn is_ident(token: &str) -> bool {
++ let mut iter = token.bytes();
++ let first_ok = iter.next().map(is_ident_start).unwrap_or(false);
++
++ first_ok && iter.all(is_ident_continue)
++}
++
++pub fn parse(tokens: &mut TokenStream, span: Span, s: &str) {
++ if is_ident(s) {
++ // Fast path, since idents are the most common token.
++ tokens.append(Ident::new(s, span));
++ } else {
++ let s: TokenStream = s.parse().expect("invalid token stream");
++ tokens.extend(s.into_iter().map(|mut t| {
++ t.set_span(span);
++ t
++ }));
++ }
++}
++
++macro_rules! push_punct {
++ ($name:ident $char1:tt) => {
++ pub fn $name(tokens: &mut TokenStream, span: Span) {
++ let mut punct = Punct::new($char1, Spacing::Alone);
++ punct.set_span(span);
++ tokens.append(punct);
++ }
++ };
++ ($name:ident $char1:tt $char2:tt) => {
++ pub fn $name(tokens: &mut TokenStream, span: Span) {
++ let mut punct = Punct::new($char1, Spacing::Joint);
++ punct.set_span(span);
++ tokens.append(punct);
++ let mut punct = Punct::new($char2, Spacing::Alone);
++ punct.set_span(span);
++ tokens.append(punct);
++ }
++ };
++ ($name:ident $char1:tt $char2:tt $char3:tt) => {
++ pub fn $name(tokens: &mut TokenStream, span: Span) {
++ let mut punct = Punct::new($char1, Spacing::Joint);
++ punct.set_span(span);
++ tokens.append(punct);
++ let mut punct = Punct::new($char2, Spacing::Joint);
++ punct.set_span(span);
++ tokens.append(punct);
++ let mut punct = Punct::new($char3, Spacing::Alone);
++ punct.set_span(span);
++ tokens.append(punct);
++ }
++ };
++}
++
++push_punct!(push_add '+');
++push_punct!(push_add_eq '+' '=');
++push_punct!(push_and '&');
++push_punct!(push_and_and '&' '&');
++push_punct!(push_and_eq '&' '=');
++push_punct!(push_at '@');
++push_punct!(push_bang '!');
++push_punct!(push_caret '^');
++push_punct!(push_caret_eq '^' '=');
++push_punct!(push_colon ':');
++push_punct!(push_colon2 ':' ':');
++push_punct!(push_comma ',');
++push_punct!(push_div '/');
++push_punct!(push_div_eq '/' '=');
++push_punct!(push_dot '.');
++push_punct!(push_dot2 '.' '.');
++push_punct!(push_dot3 '.' '.' '.');
++push_punct!(push_dot_dot_eq '.' '.' '=');
++push_punct!(push_eq '=');
++push_punct!(push_eq_eq '=' '=');
++push_punct!(push_ge '>' '=');
++push_punct!(push_gt '>');
++push_punct!(push_le '<' '=');
++push_punct!(push_lt '<');
++push_punct!(push_mul_eq '*' '=');
++push_punct!(push_ne '!' '=');
++push_punct!(push_or '|');
++push_punct!(push_or_eq '|' '=');
++push_punct!(push_or_or '|' '|');
++push_punct!(push_pound '#');
++push_punct!(push_question '?');
++push_punct!(push_rarrow '-' '>');
++push_punct!(push_larrow '<' '-');
++push_punct!(push_rem '%');
++push_punct!(push_rem_eq '%' '=');
++push_punct!(push_fat_arrow '=' '>');
++push_punct!(push_semi ';');
++push_punct!(push_shl '<' '<');
++push_punct!(push_shl_eq '<' '<' '=');
++push_punct!(push_shr '>' '>');
++push_punct!(push_shr_eq '>' '>' '=');
++push_punct!(push_star '*');
++push_punct!(push_sub '-');
++push_punct!(push_sub_eq '-' '=');
++
++// Helper method for constructing identifiers from the `format_ident!` macro,
++// handling `r#` prefixes.
++//
++// Directly parsing the input string may produce a valid identifier,
++// although the input string was invalid, due to ignored characters such as
++// whitespace and comments. Instead, we always create a non-raw identifier
++// to validate that the string is OK, and only parse again if needed.
++//
++// The `is_ident` method defined above is insufficient for validation, as it
++// will reject non-ASCII identifiers.
++pub fn mk_ident(id: &str, span: Option<Span>) -> Ident {
++ let span = span.unwrap_or_else(Span::call_site);
++
++ let is_raw = id.starts_with("r#");
++ let unraw = Ident::new(if is_raw { &id[2..] } else { id }, span);
++ if !is_raw {
++ return unraw;
++ }
++
++ // At this point, the identifier is raw, and the unraw-ed version of it was
++ // successfully converted into an identifier. Try to produce a valid raw
++ // identifier by running the `TokenStream` parser, and unwrapping the first
++ // token as an `Ident`.
++ //
++ // FIXME: When `Ident::new_raw` becomes stable, this method should be
++ // updated to call it when available.
++ match id.parse::<TokenStream>() {
++ Ok(ts) => {
++ let mut iter = ts.into_iter();
++ match (iter.next(), iter.next()) {
++ (Some(TokenTree::Ident(mut id)), None) => {
++ id.set_span(span);
++ id
++ }
++ _ => unreachable!("valid raw ident fails to parse"),
++ }
++ }
++ Err(_) => unreachable!("valid raw ident fails to parse"),
++ }
++}
++
++// Adapts from `IdentFragment` to `fmt::Display` for use by the `format_ident!`
++// macro, and exposes span information from these fragments.
++//
++// This struct also has forwarding implementations of the formatting traits
++// `Octal`, `LowerHex`, `UpperHex`, and `Binary` to allow for their use within
++// `format_ident!`.
++#[derive(Copy, Clone)]
++pub struct IdentFragmentAdapter<T: IdentFragment>(pub T);
++
++impl<T: IdentFragment> IdentFragmentAdapter<T> {
++ pub fn span(&self) -> Option<Span> {
++ self.0.span()
++ }
++}
++
++impl<T: IdentFragment> fmt::Display for IdentFragmentAdapter<T> {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ IdentFragment::fmt(&self.0, f)
++ }
++}
++
++impl<T: IdentFragment + fmt::Octal> fmt::Octal for IdentFragmentAdapter<T> {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ fmt::Octal::fmt(&self.0, f)
++ }
++}
++
++impl<T: IdentFragment + fmt::LowerHex> fmt::LowerHex for IdentFragmentAdapter<T> {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ fmt::LowerHex::fmt(&self.0, f)
++ }
++}
++
++impl<T: IdentFragment + fmt::UpperHex> fmt::UpperHex for IdentFragmentAdapter<T> {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ fmt::UpperHex::fmt(&self.0, f)
++ }
++}
++
++impl<T: IdentFragment + fmt::Binary> fmt::Binary for IdentFragmentAdapter<T> {
++ fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
++ fmt::Binary::fmt(&self.0, f)
++ }
++}
+diff --git a/third_party/rust/quote/src/spanned.rs b/third_party/rust/quote/src/spanned.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/src/spanned.rs
+@@ -0,0 +1,42 @@
++use crate::ToTokens;
++use proc_macro2::{Span, TokenStream};
++
++pub trait Spanned {
++ fn __span(&self) -> Span;
++}
++
++impl Spanned for Span {
++ fn __span(&self) -> Span {
++ *self
++ }
++}
++
++impl<T: ?Sized + ToTokens> Spanned for T {
++ fn __span(&self) -> Span {
++ join_spans(self.into_token_stream())
++ }
++}
++
++fn join_spans(tokens: TokenStream) -> Span {
++ let mut iter = tokens.into_iter().filter_map(|tt| {
++ // FIXME: This shouldn't be required, since optimally spans should
++ // never be invalid. This filter_map can probably be removed when
++ // https://github.com/rust-lang/rust/issues/43081 is resolved.
++ let span = tt.span();
++ let debug = format!("{:?}", span);
++ if debug.ends_with("bytes(0..0)") {
++ None
++ } else {
++ Some(span)
++ }
++ });
++
++ let first = match iter.next() {
++ Some(span) => span,
++ None => return Span::call_site(),
++ };
++
++ iter.fold(None, |_prev, next| Some(next))
++ .and_then(|last| first.join(last))
++ .unwrap_or(first)
++}
+diff --git a/third_party/rust/quote/src/to_tokens.rs b/third_party/rust/quote/src/to_tokens.rs
+--- a/third_party/rust/quote/src/to_tokens.rs
++++ b/third_party/rust/quote/src/to_tokens.rs
+@@ -2,10 +2,11 @@
+
+ use std::borrow::Cow;
+ use std::iter;
++use std::rc::Rc;
+
+ use proc_macro2::{Group, Ident, Literal, Punct, Span, TokenStream, TokenTree};
+
+-/// Types that can be interpolated inside a [`quote!`] invocation.
++/// Types that can be interpolated inside a `quote!` invocation.
+ ///
+ /// [`quote!`]: macro.quote.html
+ pub trait ToTokens {
+@@ -21,7 +22,7 @@
+ /// Example implementation for a struct representing Rust paths like
+ /// `std::cmp::PartialEq`:
+ ///
+- /// ```edition2018
++ /// ```
+ /// use proc_macro2::{TokenTree, Spacing, Span, Punct, TokenStream};
+ /// use quote::{TokenStreamExt, ToTokens};
+ ///
+@@ -57,13 +58,21 @@
+ ///
+ /// This method is implicitly implemented using `to_tokens`, and acts as a
+ /// convenience method for consumers of the `ToTokens` trait.
++ fn to_token_stream(&self) -> TokenStream {
++ let mut tokens = TokenStream::new();
++ self.to_tokens(&mut tokens);
++ tokens
++ }
++
++ /// Convert `self` directly into a `TokenStream` object.
++ ///
++ /// This method is implicitly implemented using `to_tokens`, and acts as a
++ /// convenience method for consumers of the `ToTokens` trait.
+ fn into_token_stream(self) -> TokenStream
+ where
+ Self: Sized,
+ {
+- let mut tokens = TokenStream::new();
+- self.to_tokens(&mut tokens);
+- tokens
++ self.to_token_stream()
+ }
+ }
+
+@@ -91,6 +100,12 @@
+ }
+ }
+
++impl<T: ?Sized + ToTokens> ToTokens for Rc<T> {
++ fn to_tokens(&self, tokens: &mut TokenStream) {
++ (**self).to_tokens(tokens);
++ }
++}
++
+ impl<T: ToTokens> ToTokens for Option<T> {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ if let Some(ref t) = *self {
+@@ -126,24 +141,20 @@
+ i16 => i16_suffixed
+ i32 => i32_suffixed
+ i64 => i64_suffixed
++ i128 => i128_suffixed
+ isize => isize_suffixed
+
+ u8 => u8_suffixed
+ u16 => u16_suffixed
+ u32 => u32_suffixed
+ u64 => u64_suffixed
++ u128 => u128_suffixed
+ usize => usize_suffixed
+
+ f32 => f32_suffixed
+ f64 => f64_suffixed
+ }
+
+-#[cfg(integer128)]
+-primitive! {
+- i128 => i128_suffixed
+- u128 => u128_suffixed
+-}
+-
+ impl ToTokens for char {
+ fn to_tokens(&self, tokens: &mut TokenStream) {
+ tokens.append(Literal::character(*self));
+diff --git a/third_party/rust/quote/tests/compiletest.rs b/third_party/rust/quote/tests/compiletest.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/compiletest.rs
+@@ -0,0 +1,6 @@
++#[rustversion::attr(not(nightly), ignore)]
++#[test]
++fn ui() {
++ let t = trybuild::TestCases::new();
++ t.compile_fail("tests/ui/*.rs");
++}
+diff --git a/third_party/rust/quote/tests/test.rs b/third_party/rust/quote/tests/test.rs
+--- a/third_party/rust/quote/tests/test.rs
++++ b/third_party/rust/quote/tests/test.rs
+@@ -1,18 +1,10 @@
+ #![cfg_attr(feature = "cargo-clippy", allow(blacklisted_name))]
+
+ use std::borrow::Cow;
+-
+-extern crate proc_macro2;
+-#[macro_use]
+-extern crate quote;
++use std::collections::BTreeSet;
+
+ use proc_macro2::{Ident, Span, TokenStream};
+-use quote::TokenStreamExt;
+-
+-mod conditional {
+- #[cfg(integer128)]
+- mod integer128;
+-}
++use quote::{format_ident, quote, TokenStreamExt};
+
+ struct X;
+
+@@ -125,18 +117,20 @@
+ let ii16 = -1i16;
+ let ii32 = -1i32;
+ let ii64 = -1i64;
++ let ii128 = -1i128;
+ let iisize = -1isize;
+ let uu8 = 1u8;
+ let uu16 = 1u16;
+ let uu32 = 1u32;
+ let uu64 = 1u64;
++ let uu128 = 1u128;
+ let uusize = 1usize;
+
+ let tokens = quote! {
+- #ii8 #ii16 #ii32 #ii64 #iisize
+- #uu8 #uu16 #uu32 #uu64 #uusize
++ #ii8 #ii16 #ii32 #ii64 #ii128 #iisize
++ #uu8 #uu16 #uu32 #uu64 #uu128 #uusize
+ };
+- let expected = "-1i8 -1i16 -1i32 -1i64 -1isize 1u8 1u16 1u32 1u64 1usize";
++ let expected = "-1i8 -1i16 -1i32 -1i64 -1i128 -1isize 1u8 1u16 1u32 1u64 1u128 1usize";
+ assert_eq!(expected, tokens.to_string());
+ }
+
+@@ -166,7 +160,7 @@
+ let tokens = quote! {
+ #zero #pound #quote #apost #newline #heart
+ };
+- let expected = "'\\u{0}' '#' '\\\"' '\\'' '\\n' '\\u{2764}'";
++ let expected = "'\\u{0}' '#' '\"' '\\'' '\\n' '\\u{2764}'";
+ assert_eq!(expected, tokens.to_string());
+ }
+
+@@ -174,7 +168,7 @@
+ fn test_str() {
+ let s = "\0 a 'b \" c";
+ let tokens = quote!(#s);
+- let expected = "\"\\u{0} a \\'b \\\" c\"";
++ let expected = "\"\\u{0} a 'b \\\" c\"";
+ assert_eq!(expected, tokens.to_string());
+ }
+
+@@ -182,7 +176,7 @@
+ fn test_string() {
+ let s = "\0 a 'b \" c".to_string();
+ let tokens = quote!(#s);
+- let expected = "\"\\u{0} a \\'b \\\" c\"";
++ let expected = "\"\\u{0} a 'b \\\" c\"";
+ assert_eq!(expected, tokens.to_string());
+ }
+
+@@ -233,9 +227,42 @@
+ }
+
+ #[test]
+-fn test_empty_repetition() {
+- let tokens = quote!(#(a b)* #(c d),*);
+- assert_eq!("", tokens.to_string());
++fn test_duplicate_name_repetition() {
++ let foo = &["a", "b"];
++
++ let tokens = quote! {
++ #(#foo: #foo),*
++ #(#foo: #foo),*
++ };
++
++ let expected = r#""a" : "a" , "b" : "b" "a" : "a" , "b" : "b""#;
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_duplicate_name_repetition_no_copy() {
++ let foo = vec!["a".to_owned(), "b".to_owned()];
++
++ let tokens = quote! {
++ #(#foo: #foo),*
++ };
++
++ let expected = r#""a" : "a" , "b" : "b""#;
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_btreeset_repetition() {
++ let mut set = BTreeSet::new();
++ set.insert("a".to_owned());
++ set.insert("b".to_owned());
++
++ let tokens = quote! {
++ #(#set: #set),*
++ };
++
++ let expected = r#""a" : "a" , "b" : "b""#;
++ assert_eq!(expected, tokens.to_string());
+ }
+
+ #[test]
+@@ -249,6 +276,19 @@
+ }
+
+ #[test]
++fn test_nonrep_in_repetition() {
++ let rep = vec!["a", "b"];
++ let nonrep = "c";
++
++ let tokens = quote! {
++ #(#rep #rep : #nonrep #nonrep),*
++ };
++
++ let expected = r#""a" "a" : "c" "c" , "b" "b" : "c" "c""#;
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
+ fn test_empty_quote() {
+ let tokens = quote!();
+ assert_eq!("", tokens.to_string());
+@@ -275,7 +315,7 @@
+ #[test]
+ fn test_closure() {
+ fn field_i(i: usize) -> Ident {
+- Ident::new(&format!("__field{}", i), Span::call_site())
++ format_ident!("__field{}", i)
+ }
+
+ let fields = (0usize..3)
+@@ -293,3 +333,97 @@
+ a.append_all(b);
+ assert_eq!("a b", a.to_string());
+ }
++
++#[test]
++fn test_format_ident() {
++ let id0 = format_ident!("Aa");
++ let id1 = format_ident!("Hello{x}", x = id0);
++ let id2 = format_ident!("Hello{x}", x = 5usize);
++ let id3 = format_ident!("Hello{}_{x}", id0, x = 10usize);
++ let id4 = format_ident!("Aa", span = Span::call_site());
++
++ assert_eq!(id0, "Aa");
++ assert_eq!(id1, "HelloAa");
++ assert_eq!(id2, "Hello5");
++ assert_eq!(id3, "HelloAa_10");
++ assert_eq!(id4, "Aa");
++}
++
++#[test]
++fn test_format_ident_strip_raw() {
++ let id = format_ident!("r#struct");
++ let my_id = format_ident!("MyId{}", id);
++ let raw_my_id = format_ident!("r#MyId{}", id);
++
++ assert_eq!(id, "r#struct");
++ assert_eq!(my_id, "MyIdstruct");
++ assert_eq!(raw_my_id, "r#MyIdstruct");
++}
++
++#[test]
++fn test_outer_line_comment() {
++ let tokens = quote! {
++ /// doc
++ };
++ let expected = "# [ doc = r\" doc\" ]";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_inner_line_comment() {
++ let tokens = quote! {
++ //! doc
++ };
++ let expected = "# ! [ doc = r\" doc\" ]";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_outer_block_comment() {
++ let tokens = quote! {
++ /** doc */
++ };
++ let expected = "# [ doc = r\" doc \" ]";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_inner_block_comment() {
++ let tokens = quote! {
++ /*! doc */
++ };
++ let expected = "# ! [ doc = r\" doc \" ]";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_outer_attr() {
++ let tokens = quote! {
++ #[inline]
++ };
++ let expected = "# [ inline ]";
++ assert_eq!(expected, tokens.to_string());
++}
++
++#[test]
++fn test_inner_attr() {
++ let tokens = quote! {
++ #![no_std]
++ };
++ let expected = "# ! [ no_std ]";
++ assert_eq!(expected, tokens.to_string());
++}
++
++// https://github.com/dtolnay/quote/issues/130
++#[test]
++fn test_star_after_repetition() {
++ let c = vec!['0', '1'];
++ let tokens = quote! {
++ #(
++ f(#c);
++ )*
++ *out = None;
++ };
++ let expected = "f ( '0' ) ; f ( '1' ) ; * out = None ;";
++ assert_eq!(expected, tokens.to_string());
++}
+diff --git a/third_party/rust/quote/tests/ui/does-not-have-iter-interpolated-dup.rs b/third_party/rust/quote/tests/ui/does-not-have-iter-interpolated-dup.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/ui/does-not-have-iter-interpolated-dup.rs
+@@ -0,0 +1,9 @@
++use quote::quote;
++
++fn main() {
++ let nonrep = "";
++
++ // Without some protection against repetitions with no iterator somewhere
++ // inside, this would loop infinitely.
++ quote!(#(#nonrep #nonrep)*);
++}
+diff --git a/third_party/rust/quote/tests/ui/does-not-have-iter-interpolated.rs b/third_party/rust/quote/tests/ui/does-not-have-iter-interpolated.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/ui/does-not-have-iter-interpolated.rs
+@@ -0,0 +1,9 @@
++use quote::quote;
++
++fn main() {
++ let nonrep = "";
++
++ // Without some protection against repetitions with no iterator somewhere
++ // inside, this would loop infinitely.
++ quote!(#(#nonrep)*);
++}
+diff --git a/third_party/rust/quote/tests/ui/does-not-have-iter-separated.rs b/third_party/rust/quote/tests/ui/does-not-have-iter-separated.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/ui/does-not-have-iter-separated.rs
+@@ -0,0 +1,5 @@
++use quote::quote;
++
++fn main() {
++ quote!(#(a b),*);
++}
+diff --git a/third_party/rust/quote/tests/ui/does-not-have-iter.rs b/third_party/rust/quote/tests/ui/does-not-have-iter.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/ui/does-not-have-iter.rs
+@@ -0,0 +1,5 @@
++use quote::quote;
++
++fn main() {
++ quote!(#(a b)*);
++}
+diff --git a/third_party/rust/quote/tests/ui/not-quotable.rs b/third_party/rust/quote/tests/ui/not-quotable.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/ui/not-quotable.rs
+@@ -0,0 +1,7 @@
++use quote::quote;
++use std::net::Ipv4Addr;
++
++fn main() {
++ let ip = Ipv4Addr::LOCALHOST;
++ let _ = quote! { #ip };
++}
+diff --git a/third_party/rust/quote/tests/ui/not-repeatable.rs b/third_party/rust/quote/tests/ui/not-repeatable.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/ui/not-repeatable.rs
+@@ -0,0 +1,7 @@
++use quote::quote;
++use std::net::Ipv4Addr;
++
++fn main() {
++ let ip = Ipv4Addr::LOCALHOST;
++ let _ = quote! { #(#ip)* };
++}
+diff --git a/third_party/rust/quote/tests/ui/wrong-type-span.rs b/third_party/rust/quote/tests/ui/wrong-type-span.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/quote/tests/ui/wrong-type-span.rs
+@@ -0,0 +1,7 @@
++use quote::quote_spanned;
++
++fn main() {
++ let span = "";
++ let x = 0;
++ quote_spanned!(span=> #x);
++}
+diff --git a/third_party/rust/rustc-hash/.cargo-checksum.json b/third_party/rust/rustc-hash/.cargo-checksum.json
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/rustc-hash/.cargo-checksum.json
+@@ -0,0 +1 @@
++{"files":{"CODE_OF_CONDUCT.md":"edca092fde496419a9f1ba640048aa0270b62dfea576cd3175f0b53e3c230470","Cargo.toml":"5bb0914fd92b42e6b7f032bfc4dd238979e5c7e505c7b5e5530c11ab441ad941","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"23f18e03dc49df91622fe2a76176497404e46ced8a715d9d2b67a7446571cca3","README.md":"f2e2a279de9cc138952f50954ea95b17f567ac21c3ae1dbcaa12a21f48dbbf31","src/lib.rs":"91e3e3cf488d5ddffaa935996575750837a1acc636d0d747f7127e5e71f458fd"},"package":"7540fc8b0c49f096ee9c961cda096467dce8084bec6bdca2fc83895fd9b28cb8"}
+\ No newline at end of file
+diff --git a/third_party/rust/rustc-hash/CODE_OF_CONDUCT.md b/third_party/rust/rustc-hash/CODE_OF_CONDUCT.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/rustc-hash/CODE_OF_CONDUCT.md
+@@ -0,0 +1,40 @@
++# The Rust Code of Conduct
++
++A version of this document [can be found online](https://www.rust-lang.org/conduct.html).
++
++## Conduct
++
++**Contact**: [rust-mods@rust-lang.org](mailto:rust-mods@rust-lang.org)
++
++* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic.
++* On IRC, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all.
++* Please be kind and courteous. There's no need to be mean or rude.
++* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer.
++* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works.
++* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behavior. We interpret the term "harassment" as including the definition in the <a href="http://citizencodeofconduct.org/">Citizen Code of Conduct</a>; if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don't tolerate behavior that excludes people in socially marginalized groups.
++* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel ops or any of the [Rust moderation team][mod_team] immediately. Whether you're a regular contributor or a newcomer, we care about making this community a safe place for you and we've got your back.
++* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behavior is not welcome.
++
++## Moderation
++
++
++These are the policies for upholding our community's standards of conduct. If you feel that a thread needs moderation, please contact the [Rust moderation team][mod_team].
++
++1. Remarks that violate the Rust standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.)
++2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed.
++3. Moderators will first respond to such remarks with a warning.
++4. If the warning is unheeded, the user will be "kicked," i.e., kicked out of the communication channel to cool off.
++5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded.
++6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology.
++7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, **in private**. Complaints about bans in-channel are not allowed.
++8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others.
++
++In the Rust community we strive to go the extra step to look out for each other. Don't just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they're off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely.
++
++And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could've communicated better — remember that it's your responsibility to make your fellow Rustaceans comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust.
++
++The enforcement policies listed above apply to all official Rust venues; including official IRC channels (#rust, #rust-internals, #rust-tools, #rust-libs, #rustc, #rust-beginners, #rust-docs, #rust-community, #rust-lang, and #cargo); GitHub repositories under rust-lang, rust-lang-nursery, and rust-lang-deprecated; and all forums under rust-lang.org (users.rust-lang.org, internals.rust-lang.org). For other projects adopting the Rust Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion.
++
++*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling) as well as the [Contributor Covenant v1.3.0](https://www.contributor-covenant.org/version/1/3/0/).*
++
++[mod_team]: https://www.rust-lang.org/team.html#Moderation-team
+diff --git a/third_party/rust/rustc-hash/Cargo.toml b/third_party/rust/rustc-hash/Cargo.toml
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/rustc-hash/Cargo.toml
+@@ -0,0 +1,23 @@
++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
++#
++# When uploading crates to the registry Cargo will automatically
++# "normalize" Cargo.toml files for maximal compatibility
++# with all versions of Cargo and also rewrite `path` dependencies
++# to registry (e.g. crates.io) dependencies
++#
++# If you believe there's an error in this file please file an
++# issue against the rust-lang/cargo repository. If you're
++# editing this file be aware that the upstream Cargo.toml
++# will likely look very different (and much more reasonable)
++
++[package]
++name = "rustc-hash"
++version = "1.0.1"
++authors = ["The Rust Project Developers"]
++description = "speed, non-cryptographic hash used in rustc"
++readme = "README.md"
++keywords = ["hash", "fxhash", "rustc"]
++license = "Apache-2.0/MIT"
++repository = "https://github.com/rust-lang-nursery/rustc-hash"
++[dependencies.byteorder]
++version = "1.1"
+diff --git a/third_party/rust/rustc-hash/LICENSE-APACHE b/third_party/rust/rustc-hash/LICENSE-APACHE
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/rustc-hash/LICENSE-APACHE
+@@ -0,0 +1,201 @@
++ Apache License
++ Version 2.0, January 2004
++ http://www.apache.org/licenses/
++
++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++
++1. Definitions.
++
++ "License" shall mean the terms and conditions for use, reproduction,
++ and distribution as defined by Sections 1 through 9 of this document.
++
++ "Licensor" shall mean the copyright owner or entity authorized by
++ the copyright owner that is granting the License.
++
++ "Legal Entity" shall mean the union of the acting entity and all
++ other entities that control, are controlled by, or are under common
++ control with that entity. For the purposes of this definition,
++ "control" means (i) the power, direct or indirect, to cause the
++ direction or management of such entity, whether by contract or
++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++ outstanding shares, or (iii) beneficial ownership of such entity.
++
++ "You" (or "Your") shall mean an individual or Legal Entity
++ exercising permissions granted by this License.
++
++ "Source" form shall mean the preferred form for making modifications,
++ including but not limited to software source code, documentation
++ source, and configuration files.
++
++ "Object" form shall mean any form resulting from mechanical
++ transformation or translation of a Source form, including but
++ not limited to compiled object code, generated documentation,
++ and conversions to other media types.
++
++ "Work" shall mean the work of authorship, whether in Source or
++ Object form, made available under the License, as indicated by a
++ copyright notice that is included in or attached to the work
++ (an example is provided in the Appendix below).
++
++ "Derivative Works" shall mean any work, whether in Source or Object
++ form, that is based on (or derived from) the Work and for which the
++ editorial revisions, annotations, elaborations, or other modifications
++ represent, as a whole, an original work of authorship. For the purposes
++ of this License, Derivative Works shall not include works that remain
++ separable from, or merely link (or bind by name) to the interfaces of,
++ the Work and Derivative Works thereof.
++
++ "Contribution" shall mean any work of authorship, including
++ the original version of the Work and any modifications or additions
++ to that Work or Derivative Works thereof, that is intentionally
++ submitted to Licensor for inclusion in the Work by the copyright owner
++ or by an individual or Legal Entity authorized to submit on behalf of
++ the copyright owner. For the purposes of this definition, "submitted"
++ means any form of electronic, verbal, or written communication sent
++ to the Licensor or its representatives, including but not limited to
++ communication on electronic mailing lists, source code control systems,
++ and issue tracking systems that are managed by, or on behalf of, the
++ Licensor for the purpose of discussing and improving the Work, but
++ excluding communication that is conspicuously marked or otherwise
++ designated in writing by the copyright owner as "Not a Contribution."
++
++ "Contributor" shall mean Licensor and any individual or Legal Entity
++ on behalf of whom a Contribution has been received by Licensor and
++ subsequently incorporated within the Work.
++
++2. Grant of Copyright License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ copyright license to reproduce, prepare Derivative Works of,
++ publicly display, publicly perform, sublicense, and distribute the
++ Work and such Derivative Works in Source or Object form.
++
++3. Grant of Patent License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ (except as stated in this section) patent license to make, have made,
++ use, offer to sell, sell, import, and otherwise transfer the Work,
++ where such license applies only to those patent claims licensable
++ by such Contributor that are necessarily infringed by their
++ Contribution(s) alone or by combination of their Contribution(s)
++ with the Work to which such Contribution(s) was submitted. If You
++ institute patent litigation against any entity (including a
++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++ or a Contribution incorporated within the Work constitutes direct
++ or contributory patent infringement, then any patent licenses
++ granted to You under this License for that Work shall terminate
++ as of the date such litigation is filed.
++
++4. Redistribution. You may reproduce and distribute copies of the
++ Work or Derivative Works thereof in any medium, with or without
++ modifications, and in Source or Object form, provided that You
++ meet the following conditions:
++
++ (a) You must give any other recipients of the Work or
++ Derivative Works a copy of this License; and
++
++ (b) You must cause any modified files to carry prominent notices
++ stating that You changed the files; and
++
++ (c) You must retain, in the Source form of any Derivative Works
++ that You distribute, all copyright, patent, trademark, and
++ attribution notices from the Source form of the Work,
++ excluding those notices that do not pertain to any part of
++ the Derivative Works; and
++
++ (d) If the Work includes a "NOTICE" text file as part of its
++ distribution, then any Derivative Works that You distribute must
++ include a readable copy of the attribution notices contained
++ within such NOTICE file, excluding those notices that do not
++ pertain to any part of the Derivative Works, in at least one
++ of the following places: within a NOTICE text file distributed
++ as part of the Derivative Works; within the Source form or
++ documentation, if provided along with the Derivative Works; or,
++ within a display generated by the Derivative Works, if and
++ wherever such third-party notices normally appear. The contents
++ of the NOTICE file are for informational purposes only and
++ do not modify the License. You may add Your own attribution
++ notices within Derivative Works that You distribute, alongside
++ or as an addendum to the NOTICE text from the Work, provided
++ that such additional attribution notices cannot be construed
++ as modifying the License.
++
++ You may add Your own copyright statement to Your modifications and
++ may provide additional or different license terms and conditions
++ for use, reproduction, or distribution of Your modifications, or
++ for any such Derivative Works as a whole, provided Your use,
++ reproduction, and distribution of the Work otherwise complies with
++ the conditions stated in this License.
++
++5. Submission of Contributions. Unless You explicitly state otherwise,
++ any Contribution intentionally submitted for inclusion in the Work
++ by You to the Licensor shall be under the terms and conditions of
++ this License, without any additional terms or conditions.
++ Notwithstanding the above, nothing herein shall supersede or modify
++ the terms of any separate license agreement you may have executed
++ with Licensor regarding such Contributions.
++
++6. Trademarks. This License does not grant permission to use the trade
++ names, trademarks, service marks, or product names of the Licensor,
++ except as required for reasonable and customary use in describing the
++ origin of the Work and reproducing the content of the NOTICE file.
++
++7. Disclaimer of Warranty. Unless required by applicable law or
++ agreed to in writing, Licensor provides the Work (and each
++ Contributor provides its Contributions) on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++ implied, including, without limitation, any warranties or conditions
++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++ PARTICULAR PURPOSE. You are solely responsible for determining the
++ appropriateness of using or redistributing the Work and assume any
++ risks associated with Your exercise of permissions under this License.
++
++8. Limitation of Liability. In no event and under no legal theory,
++ whether in tort (including negligence), contract, or otherwise,
++ unless required by applicable law (such as deliberate and grossly
++ negligent acts) or agreed to in writing, shall any Contributor be
++ liable to You for damages, including any direct, indirect, special,
++ incidental, or consequential damages of any character arising as a
++ result of this License or out of the use or inability to use the
++ Work (including but not limited to damages for loss of goodwill,
++ work stoppage, computer failure or malfunction, or any and all
++ other commercial damages or losses), even if such Contributor
++ has been advised of the possibility of such damages.
++
++9. Accepting Warranty or Additional Liability. While redistributing
++ the Work or Derivative Works thereof, You may choose to offer,
++ and charge a fee for, acceptance of support, warranty, indemnity,
++ or other liability obligations and/or rights consistent with this
++ License. However, in accepting such obligations, You may act only
++ on Your own behalf and on Your sole responsibility, not on behalf
++ of any other Contributor, and only if You agree to indemnify,
++ defend, and hold each Contributor harmless for any liability
++ incurred by, or claims asserted against, such Contributor by reason
++ of your accepting any such warranty or additional liability.
++
++END OF TERMS AND CONDITIONS
++
++APPENDIX: How to apply the Apache License to your work.
++
++ To apply the Apache License to your work, attach the following
++ boilerplate notice, with the fields enclosed by brackets "[]"
++ replaced with your own identifying information. (Don't include
++ the brackets!) The text should be enclosed in the appropriate
++ comment syntax for the file format. We also recommend that a
++ file or class name and description of purpose be included on the
++ same "printed page" as the copyright notice for easier
++ identification within third-party archives.
++
++Copyright [yyyy] [name of copyright owner]
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
+diff --git a/third_party/rust/rustc-hash/LICENSE-MIT b/third_party/rust/rustc-hash/LICENSE-MIT
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/rustc-hash/LICENSE-MIT
+@@ -0,0 +1,23 @@
++Permission is hereby granted, free of charge, to any
++person obtaining a copy of this software and associated
++documentation files (the "Software"), to deal in the
++Software without restriction, including without
++limitation the rights to use, copy, modify, merge,
++publish, distribute, sublicense, and/or sell copies of
++the Software, and to permit persons to whom the Software
++is furnished to do so, subject to the following
++conditions:
++
++The above copyright notice and this permission notice
++shall be included in all copies or substantial portions
++of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
+diff --git a/third_party/rust/rustc-hash/README.md b/third_party/rust/rustc-hash/README.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/rustc-hash/README.md
+@@ -0,0 +1,21 @@
++# rustc-hash
++
++A speedy hash algorithm used within rustc. The hashmap in liballoc by
++default uses SipHash which isn't quite as speedy as we want. In the
++compiler we're not really worried about DOS attempts, so we use a fast
++non-cryptographic hash.
++
++This is the same as the algorithm used by Firefox -- which is a
++homespun one not based on any widely-known algorithm -- though
++modified to produce 64-bit hash values instead of 32-bit hash
++values. It consistently out-performs an FNV-based hash within rustc
++itself -- the collision rate is similar or slightly worse than FNV,
++but the speed of the hash function itself is much higher because it
++works on up to 8 bytes at a time.
++
++## Usage
++
++```
++use rustc_hash::FxHashMap;
++let map: FxHashMap<u32, u32> = FxHashMap::default();
++```
+diff --git a/third_party/rust/rustc-hash/src/lib.rs b/third_party/rust/rustc-hash/src/lib.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/rustc-hash/src/lib.rs
+@@ -0,0 +1,136 @@
++// Copyright 2015 The Rust Project Developers. See the COPYRIGHT
++// file at the top-level directory of this distribution and at
++// http://rust-lang.org/COPYRIGHT.
++//
++// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
++// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
++// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
++// option. This file may not be copied, modified, or distributed
++// except according to those terms.
++
++//! Fast, non-cryptographic hash used by rustc and Firefox.
++//!
++//! # Example
++//!
++//! ```rust
++//! use rustc_hash::FxHashMap;
++//! let mut map: FxHashMap<u32, u32> = FxHashMap::default();
++//! map.insert(22, 44);
++//! ```
++
++extern crate byteorder;
++
++use std::collections::{HashMap, HashSet};
++use std::default::Default;
++use std::hash::{Hasher, BuildHasherDefault};
++use std::ops::BitXor;
++use std::mem::size_of;
++
++use byteorder::{ByteOrder, NativeEndian};
++
++/// Type alias for a hashmap using the `fx` hash algorithm.
++pub type FxHashMap<K, V> = HashMap<K, V, BuildHasherDefault<FxHasher>>;
++
++/// Type alias for a hashmap using the `fx` hash algorithm.
++pub type FxHashSet<V> = HashSet<V, BuildHasherDefault<FxHasher>>;
++
++/// A speedy hash algorithm for use within rustc. The hashmap in liballoc
++/// by default uses SipHash which isn't quite as speedy as we want. In the
++/// compiler we're not really worried about DOS attempts, so we use a fast
++/// non-cryptographic hash.
++///
++/// This is the same as the algorithm used by Firefox -- which is a homespun
++/// one not based on any widely-known algorithm -- though modified to produce
++/// 64-bit hash values instead of 32-bit hash values. It consistently
++/// out-performs an FNV-based hash within rustc itself -- the collision rate is
++/// similar or slightly worse than FNV, but the speed of the hash function
++/// itself is much higher because it works on up to 8 bytes at a time.
++pub struct FxHasher {
++ hash: usize
++}
++
++#[cfg(target_pointer_width = "32")]
++const K: usize = 0x9e3779b9;
++#[cfg(target_pointer_width = "64")]
++const K: usize = 0x517cc1b727220a95;
++
++impl Default for FxHasher {
++ #[inline]
++ fn default() -> FxHasher {
++ FxHasher { hash: 0 }
++ }
++}
++
++impl FxHasher {
++ #[inline]
++ fn add_to_hash(&mut self, i: usize) {
++ self.hash = self.hash.rotate_left(5).bitxor(i).wrapping_mul(K);
++ }
++}
++
++impl Hasher for FxHasher {
++ #[inline]
++ fn write(&mut self, mut bytes: &[u8]) {
++ #[cfg(target_pointer_width = "32")]
++ let read_usize = |bytes| NativeEndian::read_u32(bytes);
++ #[cfg(target_pointer_width = "64")]
++ let read_usize = |bytes| NativeEndian::read_u64(bytes);
++
++ let mut hash = FxHasher { hash: self.hash };
++ assert!(size_of::<usize>() <= 8);
++ while bytes.len() >= size_of::<usize>() {
++ hash.add_to_hash(read_usize(bytes) as usize);
++ bytes = &bytes[size_of::<usize>()..];
++ }
++ if (size_of::<usize>() > 4) && (bytes.len() >= 4) {
++ hash.add_to_hash(NativeEndian::read_u32(bytes) as usize);
++ bytes = &bytes[4..];
++ }
++ if (size_of::<usize>() > 2) && bytes.len() >= 2 {
++ hash.add_to_hash(NativeEndian::read_u16(bytes) as usize);
++ bytes = &bytes[2..];
++ }
++ if (size_of::<usize>() > 1) && bytes.len() >= 1 {
++ hash.add_to_hash(bytes[0] as usize);
++ }
++ self.hash = hash.hash;
++ }
++
++ #[inline]
++ fn write_u8(&mut self, i: u8) {
++ self.add_to_hash(i as usize);
++ }
++
++ #[inline]
++ fn write_u16(&mut self, i: u16) {
++ self.add_to_hash(i as usize);
++ }
++
++ #[inline]
++ fn write_u32(&mut self, i: u32) {
++ self.add_to_hash(i as usize);
++ }
++
++ #[cfg(target_pointer_width = "32")]
++ #[inline]
++ fn write_u64(&mut self, i: u64) {
++ self.add_to_hash(i as usize);
++ self.add_to_hash((i >> 32) as usize);
++ }
++
++ #[cfg(target_pointer_width = "64")]
++ #[inline]
++ fn write_u64(&mut self, i: u64) {
++ self.add_to_hash(i as usize);
++ }
++
++ #[inline]
++ fn write_usize(&mut self, i: usize) {
++ self.add_to_hash(i);
++ }
++
++ #[inline]
++ fn finish(&self) -> u64 {
++ self.hash as u64
++ }
++}
+diff --git a/third_party/rust/unicode-xid-0.1.0/.cargo-checksum.json b/third_party/rust/unicode-xid-0.1.0/.cargo-checksum.json
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/.cargo-checksum.json
+@@ -0,0 +1 @@
++{"files":{"COPYRIGHT":"23860c2a7b5d96b21569afedf033469bab9fe14a1b24a35068b8641c578ce24d","Cargo.toml":"aafcae4002bee71546a6aa40a97b9124a69f169ee7e3a9e3262338e32b4c2b9b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"67998486b32f4fe46abbbaa411b92528750e7f0e22452dc8a5b95d87d80fde75","scripts/unicode.py":"762eea92dd51238c6bf877570bde1149932ba15cf87be1618fc21cd53e941733","src/lib.rs":"4a89fadf452ae7c53536eaa4496f951a3153f8189dd1cbc532648731d30f0b11","src/tables.rs":"0643459b6ebeeed83aecd7604f0ea29c06bea7ce6c1cd9acd4988d27ace1ec53","src/tests.rs":"35a459382e190197e7b9a78832ae79f310b48a02a5b4227bf9bbc89d46c8deac"},"package":"fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"}
+\ No newline at end of file
+diff --git a/third_party/rust/unicode-xid-0.1.0/COPYRIGHT b/third_party/rust/unicode-xid-0.1.0/COPYRIGHT
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/COPYRIGHT
+@@ -0,0 +1,7 @@
++Licensed under the Apache License, Version 2.0
++<LICENSE-APACHE or
++http://www.apache.org/licenses/LICENSE-2.0> or the MIT
++license <LICENSE-MIT or http://opensource.org/licenses/MIT>,
++at your option. All files in the project carrying such
++notice may not be copied, modified, or distributed except
++according to those terms.
+diff --git a/third_party/rust/unicode-xid-0.1.0/Cargo.toml b/third_party/rust/unicode-xid-0.1.0/Cargo.toml
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/Cargo.toml
+@@ -0,0 +1,26 @@
++[package]
++
++name = "unicode-xid"
++version = "0.1.0"
++authors = ["erick.tryzelaar <erick.tryzelaar@gmail.com>",
++ "kwantam <kwantam@gmail.com>",
++ ]
++
++homepage = "https://github.com/unicode-rs/unicode-xid"
++repository = "https://github.com/unicode-rs/unicode-xid"
++documentation = "https://unicode-rs.github.io/unicode-xid"
++license = "MIT/Apache-2.0"
++keywords = ["text", "unicode", "xid"]
++readme = "README.md"
++description = """
++Determine whether characters have the XID_Start
++or XID_Continue properties according to
++Unicode Standard Annex #31.
++"""
++
++exclude = [ "target/*", "Cargo.lock" ]
++
++[features]
++default = []
++no_std = []
++bench = []
+diff --git a/third_party/rust/unicode-xid-0.1.0/LICENSE-APACHE b/third_party/rust/unicode-xid-0.1.0/LICENSE-APACHE
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/LICENSE-APACHE
+@@ -0,0 +1,201 @@
++ Apache License
++ Version 2.0, January 2004
++ http://www.apache.org/licenses/
++
++TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
++
++1. Definitions.
++
++ "License" shall mean the terms and conditions for use, reproduction,
++ and distribution as defined by Sections 1 through 9 of this document.
++
++ "Licensor" shall mean the copyright owner or entity authorized by
++ the copyright owner that is granting the License.
++
++ "Legal Entity" shall mean the union of the acting entity and all
++ other entities that control, are controlled by, or are under common
++ control with that entity. For the purposes of this definition,
++ "control" means (i) the power, direct or indirect, to cause the
++ direction or management of such entity, whether by contract or
++ otherwise, or (ii) ownership of fifty percent (50%) or more of the
++ outstanding shares, or (iii) beneficial ownership of such entity.
++
++ "You" (or "Your") shall mean an individual or Legal Entity
++ exercising permissions granted by this License.
++
++ "Source" form shall mean the preferred form for making modifications,
++ including but not limited to software source code, documentation
++ source, and configuration files.
++
++ "Object" form shall mean any form resulting from mechanical
++ transformation or translation of a Source form, including but
++ not limited to compiled object code, generated documentation,
++ and conversions to other media types.
++
++ "Work" shall mean the work of authorship, whether in Source or
++ Object form, made available under the License, as indicated by a
++ copyright notice that is included in or attached to the work
++ (an example is provided in the Appendix below).
++
++ "Derivative Works" shall mean any work, whether in Source or Object
++ form, that is based on (or derived from) the Work and for which the
++ editorial revisions, annotations, elaborations, or other modifications
++ represent, as a whole, an original work of authorship. For the purposes
++ of this License, Derivative Works shall not include works that remain
++ separable from, or merely link (or bind by name) to the interfaces of,
++ the Work and Derivative Works thereof.
++
++ "Contribution" shall mean any work of authorship, including
++ the original version of the Work and any modifications or additions
++ to that Work or Derivative Works thereof, that is intentionally
++ submitted to Licensor for inclusion in the Work by the copyright owner
++ or by an individual or Legal Entity authorized to submit on behalf of
++ the copyright owner. For the purposes of this definition, "submitted"
++ means any form of electronic, verbal, or written communication sent
++ to the Licensor or its representatives, including but not limited to
++ communication on electronic mailing lists, source code control systems,
++ and issue tracking systems that are managed by, or on behalf of, the
++ Licensor for the purpose of discussing and improving the Work, but
++ excluding communication that is conspicuously marked or otherwise
++ designated in writing by the copyright owner as "Not a Contribution."
++
++ "Contributor" shall mean Licensor and any individual or Legal Entity
++ on behalf of whom a Contribution has been received by Licensor and
++ subsequently incorporated within the Work.
++
++2. Grant of Copyright License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ copyright license to reproduce, prepare Derivative Works of,
++ publicly display, publicly perform, sublicense, and distribute the
++ Work and such Derivative Works in Source or Object form.
++
++3. Grant of Patent License. Subject to the terms and conditions of
++ this License, each Contributor hereby grants to You a perpetual,
++ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
++ (except as stated in this section) patent license to make, have made,
++ use, offer to sell, sell, import, and otherwise transfer the Work,
++ where such license applies only to those patent claims licensable
++ by such Contributor that are necessarily infringed by their
++ Contribution(s) alone or by combination of their Contribution(s)
++ with the Work to which such Contribution(s) was submitted. If You
++ institute patent litigation against any entity (including a
++ cross-claim or counterclaim in a lawsuit) alleging that the Work
++ or a Contribution incorporated within the Work constitutes direct
++ or contributory patent infringement, then any patent licenses
++ granted to You under this License for that Work shall terminate
++ as of the date such litigation is filed.
++
++4. Redistribution. You may reproduce and distribute copies of the
++ Work or Derivative Works thereof in any medium, with or without
++ modifications, and in Source or Object form, provided that You
++ meet the following conditions:
++
++ (a) You must give any other recipients of the Work or
++ Derivative Works a copy of this License; and
++
++ (b) You must cause any modified files to carry prominent notices
++ stating that You changed the files; and
++
++ (c) You must retain, in the Source form of any Derivative Works
++ that You distribute, all copyright, patent, trademark, and
++ attribution notices from the Source form of the Work,
++ excluding those notices that do not pertain to any part of
++ the Derivative Works; and
++
++ (d) If the Work includes a "NOTICE" text file as part of its
++ distribution, then any Derivative Works that You distribute must
++ include a readable copy of the attribution notices contained
++ within such NOTICE file, excluding those notices that do not
++ pertain to any part of the Derivative Works, in at least one
++ of the following places: within a NOTICE text file distributed
++ as part of the Derivative Works; within the Source form or
++ documentation, if provided along with the Derivative Works; or,
++ within a display generated by the Derivative Works, if and
++ wherever such third-party notices normally appear. The contents
++ of the NOTICE file are for informational purposes only and
++ do not modify the License. You may add Your own attribution
++ notices within Derivative Works that You distribute, alongside
++ or as an addendum to the NOTICE text from the Work, provided
++ that such additional attribution notices cannot be construed
++ as modifying the License.
++
++ You may add Your own copyright statement to Your modifications and
++ may provide additional or different license terms and conditions
++ for use, reproduction, or distribution of Your modifications, or
++ for any such Derivative Works as a whole, provided Your use,
++ reproduction, and distribution of the Work otherwise complies with
++ the conditions stated in this License.
++
++5. Submission of Contributions. Unless You explicitly state otherwise,
++ any Contribution intentionally submitted for inclusion in the Work
++ by You to the Licensor shall be under the terms and conditions of
++ this License, without any additional terms or conditions.
++ Notwithstanding the above, nothing herein shall supersede or modify
++ the terms of any separate license agreement you may have executed
++ with Licensor regarding such Contributions.
++
++6. Trademarks. This License does not grant permission to use the trade
++ names, trademarks, service marks, or product names of the Licensor,
++ except as required for reasonable and customary use in describing the
++ origin of the Work and reproducing the content of the NOTICE file.
++
++7. Disclaimer of Warranty. Unless required by applicable law or
++ agreed to in writing, Licensor provides the Work (and each
++ Contributor provides its Contributions) on an "AS IS" BASIS,
++ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
++ implied, including, without limitation, any warranties or conditions
++ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
++ PARTICULAR PURPOSE. You are solely responsible for determining the
++ appropriateness of using or redistributing the Work and assume any
++ risks associated with Your exercise of permissions under this License.
++
++8. Limitation of Liability. In no event and under no legal theory,
++ whether in tort (including negligence), contract, or otherwise,
++ unless required by applicable law (such as deliberate and grossly
++ negligent acts) or agreed to in writing, shall any Contributor be
++ liable to You for damages, including any direct, indirect, special,
++ incidental, or consequential damages of any character arising as a
++ result of this License or out of the use or inability to use the
++ Work (including but not limited to damages for loss of goodwill,
++ work stoppage, computer failure or malfunction, or any and all
++ other commercial damages or losses), even if such Contributor
++ has been advised of the possibility of such damages.
++
++9. Accepting Warranty or Additional Liability. While redistributing
++ the Work or Derivative Works thereof, You may choose to offer,
++ and charge a fee for, acceptance of support, warranty, indemnity,
++ or other liability obligations and/or rights consistent with this
++ License. However, in accepting such obligations, You may act only
++ on Your own behalf and on Your sole responsibility, not on behalf
++ of any other Contributor, and only if You agree to indemnify,
++ defend, and hold each Contributor harmless for any liability
++ incurred by, or claims asserted against, such Contributor by reason
++ of your accepting any such warranty or additional liability.
++
++END OF TERMS AND CONDITIONS
++
++APPENDIX: How to apply the Apache License to your work.
++
++ To apply the Apache License to your work, attach the following
++ boilerplate notice, with the fields enclosed by brackets "[]"
++ replaced with your own identifying information. (Don't include
++ the brackets!) The text should be enclosed in the appropriate
++ comment syntax for the file format. We also recommend that a
++ file or class name and description of purpose be included on the
++ same "printed page" as the copyright notice for easier
++ identification within third-party archives.
++
++Copyright [yyyy] [name of copyright owner]
++
++Licensed under the Apache License, Version 2.0 (the "License");
++you may not use this file except in compliance with the License.
++You may obtain a copy of the License at
++
++ http://www.apache.org/licenses/LICENSE-2.0
++
++Unless required by applicable law or agreed to in writing, software
++distributed under the License is distributed on an "AS IS" BASIS,
++WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
++See the License for the specific language governing permissions and
++limitations under the License.
+diff --git a/third_party/rust/unicode-xid-0.1.0/LICENSE-MIT b/third_party/rust/unicode-xid-0.1.0/LICENSE-MIT
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/LICENSE-MIT
+@@ -0,0 +1,25 @@
++Copyright (c) 2015 The Rust Project Developers
++
++Permission is hereby granted, free of charge, to any
++person obtaining a copy of this software and associated
++documentation files (the "Software"), to deal in the
++Software without restriction, including without
++limitation the rights to use, copy, modify, merge,
++publish, distribute, sublicense, and/or sell copies of
++the Software, and to permit persons to whom the Software
++is furnished to do so, subject to the following
++conditions:
++
++The above copyright notice and this permission notice
++shall be included in all copies or substantial portions
++of the Software.
++
++THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
++ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
++TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
++PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
++SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
++CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
++OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
++IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
++DEALINGS IN THE SOFTWARE.
+diff --git a/third_party/rust/unicode-xid-0.1.0/README.md b/third_party/rust/unicode-xid-0.1.0/README.md
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/README.md
+@@ -0,0 +1,34 @@
++# unicode-xid
++
++Determine if a `char` is a valid identifier for a parser and/or lexer according to
++[Unicode Standard Annex #31](http://www.unicode.org/reports/tr31/) rules.
++
++[![Build Status](https://travis-ci.org/unicode-rs/unicode-xid.svg)](https://travis-ci.org/unicode-rs/unicode-xid)
++
++[Documentation](https://unicode-rs.github.io/unicode-xid/unicode_xid/index.html)
++
++```rust
++extern crate unicode_xid;
++
++use unicode_xid::UnicodeXID;
++
++fn main() {
++ let ch = 'a';
++ println!("Is {} a valid start of an identifier? {}", ch, UnicodeXID::is_xid_start(ch));
++}
++```
++
++# features
++
++unicode-xid supports a `no_std` feature. This eliminates dependence
++on std, and instead uses equivalent functions from core.
++
++# crates.io
++
++You can use this package in your project by adding the following
++to your `Cargo.toml`:
++
++```toml
++[dependencies]
++unicode-xid = "0.0.4"
++```
+diff --git a/third_party/rust/unicode-xid/scripts/unicode.py b/third_party/rust/unicode-xid-0.1.0/scripts/unicode.py
+rename from third_party/rust/unicode-xid/scripts/unicode.py
+rename to third_party/rust/unicode-xid-0.1.0/scripts/unicode.py
+diff --git a/third_party/rust/unicode-xid-0.1.0/src/lib.rs b/third_party/rust/unicode-xid-0.1.0/src/lib.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/src/lib.rs
+@@ -0,0 +1,87 @@
++// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
++// file at the top-level directory of this distribution and at
++// http://rust-lang.org/COPYRIGHT.
++//
++// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
++// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
++// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
++// option. This file may not be copied, modified, or distributed
++// except according to those terms.
++
++//! Determine if a `char` is a valid identifier for a parser and/or lexer according to
++//! [Unicode Standard Annex #31](http://www.unicode.org/reports/tr31/) rules.
++//!
++//! ```rust
++//! extern crate unicode_xid;
++//!
++//! use unicode_xid::UnicodeXID;
++//!
++//! fn main() {
++//! let ch = 'a';
++//! println!("Is {} a valid start of an identifier? {}", ch, UnicodeXID::is_xid_start(ch));
++//! }
++//! ```
++//!
++//! # features
++//!
++//! unicode-xid supports a `no_std` feature. This eliminates dependence
++//! on std, and instead uses equivalent functions from core.
++//!
++//! # crates.io
++//!
++//! You can use this package in your project by adding the following
++//! to your `Cargo.toml`:
++//!
++//! ```toml
++//! [dependencies]
++//! unicode-xid = "0.0.4"
++//! ```
++
++#![deny(missing_docs, unsafe_code)]
++#![doc(html_logo_url = "https://unicode-rs.github.io/unicode-rs_sm.png",
++ html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png")]
++
++#![no_std]
++#![cfg_attr(feature = "bench", feature(test, unicode))]
++
++#[cfg(test)]
++#[macro_use]
++extern crate std;
++
++#[cfg(feature = "bench")]
++extern crate test;
++
++use tables::derived_property;
++pub use tables::UNICODE_VERSION;
++
++mod tables;
++
++#[cfg(test)]
++mod tests;
++
++/// Methods for determining if a character is a valid identifier character.
++pub trait UnicodeXID {
++ /// Returns whether the specified character satisfies the 'XID_Start'
++ /// Unicode property.
++ ///
++ /// 'XID_Start' is a Unicode Derived Property specified in
++ /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
++ /// mostly similar to ID_Start but modified for closure under NFKx.
++ fn is_xid_start(self) -> bool;
++
++ /// Returns whether the specified `char` satisfies the 'XID_Continue'
++ /// Unicode property.
++ ///
++ /// 'XID_Continue' is a Unicode Derived Property specified in
++ /// [UAX #31](http://unicode.org/reports/tr31/#NFKC_Modifications),
++ /// mostly similar to 'ID_Continue' but modified for closure under NFKx.
++ fn is_xid_continue(self) -> bool;
++}
++
++impl UnicodeXID for char {
++ #[inline]
++ fn is_xid_start(self) -> bool { derived_property::XID_Start(self) }
++
++ #[inline]
++ fn is_xid_continue(self) -> bool { derived_property::XID_Continue(self) }
++}
+diff --git a/third_party/rust/unicode-xid-0.1.0/src/tables.rs b/third_party/rust/unicode-xid-0.1.0/src/tables.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/src/tables.rs
+@@ -0,0 +1,426 @@
++// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
++// file at the top-level directory of this distribution and at
++// http://rust-lang.org/COPYRIGHT.
++//
++// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
++// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
++// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
++// option. This file may not be copied, modified, or distributed
++// except according to those terms.
++
++// NOTE: The following code was generated by "scripts/unicode.py", do not edit directly
++
++#![allow(missing_docs, non_upper_case_globals, non_snake_case)]
++
++/// The version of [Unicode](http://www.unicode.org/)
++/// that this version of unicode-xid is based on.
++pub const UNICODE_VERSION: (u64, u64, u64) = (9, 0, 0);
++
++fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
++ use core::cmp::Ordering::{Equal, Less, Greater};
++
++ r.binary_search_by(|&(lo,hi)| {
++ if lo <= c && c <= hi { Equal }
++ else if hi < c { Less }
++ else { Greater }
++ }).is_ok()
++}
++
++pub mod derived_property {
++ pub const XID_Continue_table: &'static [(char, char)] = &[
++ ('\u{30}', '\u{39}'), ('\u{41}', '\u{5a}'), ('\u{5f}', '\u{5f}'), ('\u{61}', '\u{7a}'),
++ ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), ('\u{b7}', '\u{b7}'), ('\u{ba}', '\u{ba}'),
++ ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), ('\u{2c6}', '\u{2d1}'),
++ ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}', '\u{2ee}'), ('\u{300}',
++ '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), ('\u{37f}', '\u{37f}'),
++ ('\u{386}', '\u{38a}'), ('\u{38c}', '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}',
++ '\u{3f5}'), ('\u{3f7}', '\u{481}'), ('\u{483}', '\u{487}'), ('\u{48a}', '\u{52f}'),
++ ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}', '\u{587}'), ('\u{591}',
++ '\u{5bd}'), ('\u{5bf}', '\u{5bf}'), ('\u{5c1}', '\u{5c2}'), ('\u{5c4}', '\u{5c5}'),
++ ('\u{5c7}', '\u{5c7}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{610}',
++ '\u{61a}'), ('\u{620}', '\u{669}'), ('\u{66e}', '\u{6d3}'), ('\u{6d5}', '\u{6dc}'),
++ ('\u{6df}', '\u{6e8}'), ('\u{6ea}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), ('\u{710}',
++ '\u{74a}'), ('\u{74d}', '\u{7b1}'), ('\u{7c0}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'),
++ ('\u{800}', '\u{82d}'), ('\u{840}', '\u{85b}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}',
++ '\u{8bd}'), ('\u{8d4}', '\u{8e1}'), ('\u{8e3}', '\u{963}'), ('\u{966}', '\u{96f}'),
++ ('\u{971}', '\u{983}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'), ('\u{993}',
++ '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', '\u{9b9}'),
++ ('\u{9bc}', '\u{9c4}'), ('\u{9c7}', '\u{9c8}'), ('\u{9cb}', '\u{9ce}'), ('\u{9d7}',
++ '\u{9d7}'), ('\u{9dc}', '\u{9dd}'), ('\u{9df}', '\u{9e3}'), ('\u{9e6}', '\u{9f1}'),
++ ('\u{a01}', '\u{a03}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'), ('\u{a13}',
++ '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}', '\u{a36}'),
++ ('\u{a38}', '\u{a39}'), ('\u{a3c}', '\u{a3c}'), ('\u{a3e}', '\u{a42}'), ('\u{a47}',
++ '\u{a48}'), ('\u{a4b}', '\u{a4d}'), ('\u{a51}', '\u{a51}'), ('\u{a59}', '\u{a5c}'),
++ ('\u{a5e}', '\u{a5e}'), ('\u{a66}', '\u{a75}'), ('\u{a81}', '\u{a83}'), ('\u{a85}',
++ '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}', '\u{ab0}'),
++ ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abc}', '\u{ac5}'), ('\u{ac7}',
++ '\u{ac9}'), ('\u{acb}', '\u{acd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae3}'),
++ ('\u{ae6}', '\u{aef}'), ('\u{af9}', '\u{af9}'), ('\u{b01}', '\u{b03}'), ('\u{b05}',
++ '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}', '\u{b30}'),
++ ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3c}', '\u{b44}'), ('\u{b47}',
++ '\u{b48}'), ('\u{b4b}', '\u{b4d}'), ('\u{b56}', '\u{b57}'), ('\u{b5c}', '\u{b5d}'),
++ ('\u{b5f}', '\u{b63}'), ('\u{b66}', '\u{b6f}'), ('\u{b71}', '\u{b71}'), ('\u{b82}',
++ '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}', '\u{b95}'),
++ ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'), ('\u{ba3}',
++ '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bbe}', '\u{bc2}'),
++ ('\u{bc6}', '\u{bc8}'), ('\u{bca}', '\u{bcd}'), ('\u{bd0}', '\u{bd0}'), ('\u{bd7}',
++ '\u{bd7}'), ('\u{be6}', '\u{bef}'), ('\u{c00}', '\u{c03}'), ('\u{c05}', '\u{c0c}'),
++ ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}',
++ '\u{c44}'), ('\u{c46}', '\u{c48}'), ('\u{c4a}', '\u{c4d}'), ('\u{c55}', '\u{c56}'),
++ ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c63}'), ('\u{c66}', '\u{c6f}'), ('\u{c80}',
++ '\u{c83}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'),
++ ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbc}', '\u{cc4}'), ('\u{cc6}',
++ '\u{cc8}'), ('\u{cca}', '\u{ccd}'), ('\u{cd5}', '\u{cd6}'), ('\u{cde}', '\u{cde}'),
++ ('\u{ce0}', '\u{ce3}'), ('\u{ce6}', '\u{cef}'), ('\u{cf1}', '\u{cf2}'), ('\u{d01}',
++ '\u{d03}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'),
++ ('\u{d3d}', '\u{d44}'), ('\u{d46}', '\u{d48}'), ('\u{d4a}', '\u{d4e}'), ('\u{d54}',
++ '\u{d57}'), ('\u{d5f}', '\u{d63}'), ('\u{d66}', '\u{d6f}'), ('\u{d7a}', '\u{d7f}'),
++ ('\u{d82}', '\u{d83}'), ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}',
++ '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{dca}', '\u{dca}'),
++ ('\u{dcf}', '\u{dd4}'), ('\u{dd6}', '\u{dd6}'), ('\u{dd8}', '\u{ddf}'), ('\u{de6}',
++ '\u{def}'), ('\u{df2}', '\u{df3}'), ('\u{e01}', '\u{e3a}'), ('\u{e40}', '\u{e4e}'),
++ ('\u{e50}', '\u{e59}'), ('\u{e81}', '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e87}',
++ '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}', '\u{e8d}'), ('\u{e94}', '\u{e97}'),
++ ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'), ('\u{ea5}', '\u{ea5}'), ('\u{ea7}',
++ '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}', '\u{eb9}'), ('\u{ebb}', '\u{ebd}'),
++ ('\u{ec0}', '\u{ec4}'), ('\u{ec6}', '\u{ec6}'), ('\u{ec8}', '\u{ecd}'), ('\u{ed0}',
++ '\u{ed9}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f18}', '\u{f19}'),
++ ('\u{f20}', '\u{f29}'), ('\u{f35}', '\u{f35}'), ('\u{f37}', '\u{f37}'), ('\u{f39}',
++ '\u{f39}'), ('\u{f3e}', '\u{f47}'), ('\u{f49}', '\u{f6c}'), ('\u{f71}', '\u{f84}'),
++ ('\u{f86}', '\u{f97}'), ('\u{f99}', '\u{fbc}'), ('\u{fc6}', '\u{fc6}'), ('\u{1000}',
++ '\u{1049}'), ('\u{1050}', '\u{109d}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'),
++ ('\u{10cd}', '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}',
++ '\u{124d}'), ('\u{1250}', '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'),
++ ('\u{1260}', '\u{1288}'), ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}',
++ '\u{12b5}'), ('\u{12b8}', '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'),
++ ('\u{12c8}', '\u{12d6}'), ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}',
++ '\u{135a}'), ('\u{135d}', '\u{135f}'), ('\u{1369}', '\u{1371}'), ('\u{1380}', '\u{138f}'),
++ ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}', '\u{166c}'), ('\u{166f}',
++ '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'), ('\u{16ee}', '\u{16f8}'),
++ ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1714}'), ('\u{1720}', '\u{1734}'), ('\u{1740}',
++ '\u{1753}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'), ('\u{1772}', '\u{1773}'),
++ ('\u{1780}', '\u{17d3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dd}'), ('\u{17e0}',
++ '\u{17e9}'), ('\u{180b}', '\u{180d}'), ('\u{1810}', '\u{1819}'), ('\u{1820}', '\u{1877}'),
++ ('\u{1880}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), ('\u{1900}', '\u{191e}'), ('\u{1920}',
++ '\u{192b}'), ('\u{1930}', '\u{193b}'), ('\u{1946}', '\u{196d}'), ('\u{1970}', '\u{1974}'),
++ ('\u{1980}', '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{19d0}', '\u{19da}'), ('\u{1a00}',
++ '\u{1a1b}'), ('\u{1a20}', '\u{1a5e}'), ('\u{1a60}', '\u{1a7c}'), ('\u{1a7f}', '\u{1a89}'),
++ ('\u{1a90}', '\u{1a99}'), ('\u{1aa7}', '\u{1aa7}'), ('\u{1ab0}', '\u{1abd}'), ('\u{1b00}',
++ '\u{1b4b}'), ('\u{1b50}', '\u{1b59}'), ('\u{1b6b}', '\u{1b73}'), ('\u{1b80}', '\u{1bf3}'),
++ ('\u{1c00}', '\u{1c37}'), ('\u{1c40}', '\u{1c49}'), ('\u{1c4d}', '\u{1c7d}'), ('\u{1c80}',
++ '\u{1c88}'), ('\u{1cd0}', '\u{1cd2}'), ('\u{1cd4}', '\u{1cf6}'), ('\u{1cf8}', '\u{1cf9}'),
++ ('\u{1d00}', '\u{1df5}'), ('\u{1dfb}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}',
++ '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'),
++ ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}',
++ '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'),
++ ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}',
++ '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{203f}', '\u{2040}'),
++ ('\u{2054}', '\u{2054}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}',
++ '\u{209c}'), ('\u{20d0}', '\u{20dc}'), ('\u{20e1}', '\u{20e1}'), ('\u{20e5}', '\u{20f0}'),
++ ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), ('\u{2115}',
++ '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', '\u{2126}'),
++ ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), ('\u{2145}',
++ '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', '\u{2c2e}'),
++ ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cf3}'), ('\u{2d00}',
++ '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'),
++ ('\u{2d6f}', '\u{2d6f}'), ('\u{2d7f}', '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}',
++ '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'),
++ ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{2de0}',
++ '\u{2dff}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{302f}'), ('\u{3031}', '\u{3035}'),
++ ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{3099}', '\u{309a}'), ('\u{309d}',
++ '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'),
++ ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}',
++ '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'),
++ ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a62b}'), ('\u{a640}', '\u{a66f}'), ('\u{a674}',
++ '\u{a67d}'), ('\u{a67f}', '\u{a6f1}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', '\u{a788}'),
++ ('\u{a78b}', '\u{a7ae}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}', '\u{a827}'), ('\u{a840}',
++ '\u{a873}'), ('\u{a880}', '\u{a8c5}'), ('\u{a8d0}', '\u{a8d9}'), ('\u{a8e0}', '\u{a8f7}'),
++ ('\u{a8fb}', '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a900}', '\u{a92d}'), ('\u{a930}',
++ '\u{a953}'), ('\u{a960}', '\u{a97c}'), ('\u{a980}', '\u{a9c0}'), ('\u{a9cf}', '\u{a9d9}'),
++ ('\u{a9e0}', '\u{a9fe}'), ('\u{aa00}', '\u{aa36}'), ('\u{aa40}', '\u{aa4d}'), ('\u{aa50}',
++ '\u{aa59}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'),
++ ('\u{aae0}', '\u{aaef}'), ('\u{aaf2}', '\u{aaf6}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}',
++ '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'),
++ ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abea}'), ('\u{abec}',
++ '\u{abed}'), ('\u{abf0}', '\u{abf9}'), ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'),
++ ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}',
++ '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{fb1d}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'),
++ ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}',
++ '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'),
++ ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe00}',
++ '\u{fe0f}'), ('\u{fe20}', '\u{fe2f}'), ('\u{fe33}', '\u{fe34}'), ('\u{fe4d}', '\u{fe4f}'),
++ ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}',
++ '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'),
++ ('\u{ff10}', '\u{ff19}'), ('\u{ff21}', '\u{ff3a}'), ('\u{ff3f}', '\u{ff3f}'), ('\u{ff41}',
++ '\u{ff5a}'), ('\u{ff66}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'),
++ ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', '\u{1000b}'),
++ ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), ('\u{1003c}', '\u{1003d}'),
++ ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), ('\u{10080}', '\u{100fa}'),
++ ('\u{10140}', '\u{10174}'), ('\u{101fd}', '\u{101fd}'), ('\u{10280}', '\u{1029c}'),
++ ('\u{102a0}', '\u{102d0}'), ('\u{102e0}', '\u{102e0}'), ('\u{10300}', '\u{1031f}'),
++ ('\u{10330}', '\u{1034a}'), ('\u{10350}', '\u{1037a}'), ('\u{10380}', '\u{1039d}'),
++ ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', '\u{103cf}'), ('\u{103d1}', '\u{103d5}'),
++ ('\u{10400}', '\u{1049d}'), ('\u{104a0}', '\u{104a9}'), ('\u{104b0}', '\u{104d3}'),
++ ('\u{104d8}', '\u{104fb}'), ('\u{10500}', '\u{10527}'), ('\u{10530}', '\u{10563}'),
++ ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'), ('\u{10760}', '\u{10767}'),
++ ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'), ('\u{1080a}', '\u{10835}'),
++ ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'), ('\u{1083f}', '\u{10855}'),
++ ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'), ('\u{108e0}', '\u{108f2}'),
++ ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'), ('\u{10920}', '\u{10939}'),
++ ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'), ('\u{10a00}', '\u{10a03}'),
++ ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'),
++ ('\u{10a19}', '\u{10a33}'), ('\u{10a38}', '\u{10a3a}'), ('\u{10a3f}', '\u{10a3f}'),
++ ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'),
++ ('\u{10ac9}', '\u{10ae6}'), ('\u{10b00}', '\u{10b35}'), ('\u{10b40}', '\u{10b55}'),
++ ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'), ('\u{10c00}', '\u{10c48}'),
++ ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{11000}', '\u{11046}'),
++ ('\u{11066}', '\u{1106f}'), ('\u{1107f}', '\u{110ba}'), ('\u{110d0}', '\u{110e8}'),
++ ('\u{110f0}', '\u{110f9}'), ('\u{11100}', '\u{11134}'), ('\u{11136}', '\u{1113f}'),
++ ('\u{11150}', '\u{11173}'), ('\u{11176}', '\u{11176}'), ('\u{11180}', '\u{111c4}'),
++ ('\u{111ca}', '\u{111cc}'), ('\u{111d0}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'),
++ ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{11237}'), ('\u{1123e}', '\u{1123e}'),
++ ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'),
++ ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112ea}'),
++ ('\u{112f0}', '\u{112f9}'), ('\u{11300}', '\u{11303}'), ('\u{11305}', '\u{1130c}'),
++ ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'), ('\u{1132a}', '\u{11330}'),
++ ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133c}', '\u{11344}'),
++ ('\u{11347}', '\u{11348}'), ('\u{1134b}', '\u{1134d}'), ('\u{11350}', '\u{11350}'),
++ ('\u{11357}', '\u{11357}'), ('\u{1135d}', '\u{11363}'), ('\u{11366}', '\u{1136c}'),
++ ('\u{11370}', '\u{11374}'), ('\u{11400}', '\u{1144a}'), ('\u{11450}', '\u{11459}'),
++ ('\u{11480}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{114d0}', '\u{114d9}'),
++ ('\u{11580}', '\u{115b5}'), ('\u{115b8}', '\u{115c0}'), ('\u{115d8}', '\u{115dd}'),
++ ('\u{11600}', '\u{11640}'), ('\u{11644}', '\u{11644}'), ('\u{11650}', '\u{11659}'),
++ ('\u{11680}', '\u{116b7}'), ('\u{116c0}', '\u{116c9}'), ('\u{11700}', '\u{11719}'),
++ ('\u{1171d}', '\u{1172b}'), ('\u{11730}', '\u{11739}'), ('\u{118a0}', '\u{118e9}'),
++ ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'),
++ ('\u{11c0a}', '\u{11c36}'), ('\u{11c38}', '\u{11c40}'), ('\u{11c50}', '\u{11c59}'),
++ ('\u{11c72}', '\u{11c8f}'), ('\u{11c92}', '\u{11ca7}'), ('\u{11ca9}', '\u{11cb6}'),
++ ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'),
++ ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'),
++ ('\u{16a40}', '\u{16a5e}'), ('\u{16a60}', '\u{16a69}'), ('\u{16ad0}', '\u{16aed}'),
++ ('\u{16af0}', '\u{16af4}'), ('\u{16b00}', '\u{16b36}'), ('\u{16b40}', '\u{16b43}'),
++ ('\u{16b50}', '\u{16b59}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'),
++ ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f7e}'), ('\u{16f8f}', '\u{16f9f}'),
++ ('\u{16fe0}', '\u{16fe0}'), ('\u{17000}', '\u{187ec}'), ('\u{18800}', '\u{18af2}'),
++ ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'),
++ ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1bc9d}', '\u{1bc9e}'),
++ ('\u{1d165}', '\u{1d169}'), ('\u{1d16d}', '\u{1d172}'), ('\u{1d17b}', '\u{1d182}'),
++ ('\u{1d185}', '\u{1d18b}'), ('\u{1d1aa}', '\u{1d1ad}'), ('\u{1d242}', '\u{1d244}'),
++ ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'),
++ ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'),
++ ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'),
++ ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'),
++ ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'),
++ ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'),
++ ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'),
++ ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'),
++ ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'),
++ ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'),
++ ('\u{1d7ce}', '\u{1d7ff}'), ('\u{1da00}', '\u{1da36}'), ('\u{1da3b}', '\u{1da6c}'),
++ ('\u{1da75}', '\u{1da75}'), ('\u{1da84}', '\u{1da84}'), ('\u{1da9b}', '\u{1da9f}'),
++ ('\u{1daa1}', '\u{1daaf}'), ('\u{1e000}', '\u{1e006}'), ('\u{1e008}', '\u{1e018}'),
++ ('\u{1e01b}', '\u{1e021}'), ('\u{1e023}', '\u{1e024}'), ('\u{1e026}', '\u{1e02a}'),
++ ('\u{1e800}', '\u{1e8c4}'), ('\u{1e8d0}', '\u{1e8d6}'), ('\u{1e900}', '\u{1e94a}'),
++ ('\u{1e950}', '\u{1e959}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'),
++ ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'),
++ ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'),
++ ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'),
++ ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'),
++ ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'),
++ ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'),
++ ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'),
++ ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'),
++ ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'),
++ ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'),
++ ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'),
++ ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}'),
++ ('\u{e0100}', '\u{e01ef}')
++ ];
++
++ pub fn XID_Continue(c: char) -> bool {
++ super::bsearch_range_table(c, XID_Continue_table)
++ }
++
++ pub const XID_Start_table: &'static [(char, char)] = &[
++ ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'),
++ ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'),
++ ('\u{2c6}', '\u{2d1}'), ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}',
++ '\u{2ee}'), ('\u{370}', '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'),
++ ('\u{37f}', '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}',
++ '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', '\u{3f5}'), ('\u{3f7}', '\u{481}'),
++ ('\u{48a}', '\u{52f}'), ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}',
++ '\u{587}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{620}', '\u{64a}'),
++ ('\u{66e}', '\u{66f}'), ('\u{671}', '\u{6d3}'), ('\u{6d5}', '\u{6d5}'), ('\u{6e5}',
++ '\u{6e6}'), ('\u{6ee}', '\u{6ef}'), ('\u{6fa}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'),
++ ('\u{710}', '\u{710}'), ('\u{712}', '\u{72f}'), ('\u{74d}', '\u{7a5}'), ('\u{7b1}',
++ '\u{7b1}'), ('\u{7ca}', '\u{7ea}'), ('\u{7f4}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'),
++ ('\u{800}', '\u{815}'), ('\u{81a}', '\u{81a}'), ('\u{824}', '\u{824}'), ('\u{828}',
++ '\u{828}'), ('\u{840}', '\u{858}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}', '\u{8bd}'),
++ ('\u{904}', '\u{939}'), ('\u{93d}', '\u{93d}'), ('\u{950}', '\u{950}'), ('\u{958}',
++ '\u{961}'), ('\u{971}', '\u{980}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'),
++ ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}',
++ '\u{9b9}'), ('\u{9bd}', '\u{9bd}'), ('\u{9ce}', '\u{9ce}'), ('\u{9dc}', '\u{9dd}'),
++ ('\u{9df}', '\u{9e1}'), ('\u{9f0}', '\u{9f1}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}',
++ '\u{a10}'), ('\u{a13}', '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'),
++ ('\u{a35}', '\u{a36}'), ('\u{a38}', '\u{a39}'), ('\u{a59}', '\u{a5c}'), ('\u{a5e}',
++ '\u{a5e}'), ('\u{a72}', '\u{a74}'), ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'),
++ ('\u{a93}', '\u{aa8}'), ('\u{aaa}', '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}',
++ '\u{ab9}'), ('\u{abd}', '\u{abd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae1}'),
++ ('\u{af9}', '\u{af9}'), ('\u{b05}', '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}',
++ '\u{b28}'), ('\u{b2a}', '\u{b30}'), ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'),
++ ('\u{b3d}', '\u{b3d}'), ('\u{b5c}', '\u{b5d}'), ('\u{b5f}', '\u{b61}'), ('\u{b71}',
++ '\u{b71}'), ('\u{b83}', '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'),
++ ('\u{b92}', '\u{b95}'), ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}',
++ '\u{b9f}'), ('\u{ba3}', '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'),
++ ('\u{bd0}', '\u{bd0}'), ('\u{c05}', '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}',
++ '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}', '\u{c3d}'), ('\u{c58}', '\u{c5a}'),
++ ('\u{c60}', '\u{c61}'), ('\u{c80}', '\u{c80}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}',
++ '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'),
++ ('\u{cbd}', '\u{cbd}'), ('\u{cde}', '\u{cde}'), ('\u{ce0}', '\u{ce1}'), ('\u{cf1}',
++ '\u{cf2}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'),
++ ('\u{d3d}', '\u{d3d}'), ('\u{d4e}', '\u{d4e}'), ('\u{d54}', '\u{d56}'), ('\u{d5f}',
++ '\u{d61}'), ('\u{d7a}', '\u{d7f}'), ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'),
++ ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{e01}',
++ '\u{e30}'), ('\u{e32}', '\u{e32}'), ('\u{e40}', '\u{e46}'), ('\u{e81}', '\u{e82}'),
++ ('\u{e84}', '\u{e84}'), ('\u{e87}', '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}',
++ '\u{e8d}'), ('\u{e94}', '\u{e97}'), ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'),
++ ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}',
++ '\u{eb0}'), ('\u{eb2}', '\u{eb2}'), ('\u{ebd}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'),
++ ('\u{ec6}', '\u{ec6}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f40}',
++ '\u{f47}'), ('\u{f49}', '\u{f6c}'), ('\u{f88}', '\u{f8c}'), ('\u{1000}', '\u{102a}'),
++ ('\u{103f}', '\u{103f}'), ('\u{1050}', '\u{1055}'), ('\u{105a}', '\u{105d}'), ('\u{1061}',
++ '\u{1061}'), ('\u{1065}', '\u{1066}'), ('\u{106e}', '\u{1070}'), ('\u{1075}', '\u{1081}'),
++ ('\u{108e}', '\u{108e}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}',
++ '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'),
++ ('\u{1250}', '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}',
++ '\u{1288}'), ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'),
++ ('\u{12b8}', '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}',
++ '\u{12d6}'), ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'),
++ ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}',
++ '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'),
++ ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1711}'), ('\u{1720}',
++ '\u{1731}'), ('\u{1740}', '\u{1751}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'),
++ ('\u{1780}', '\u{17b3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dc}'), ('\u{1820}',
++ '\u{1877}'), ('\u{1880}', '\u{18a8}'), ('\u{18aa}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'),
++ ('\u{1900}', '\u{191e}'), ('\u{1950}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}',
++ '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{1a00}', '\u{1a16}'), ('\u{1a20}', '\u{1a54}'),
++ ('\u{1aa7}', '\u{1aa7}'), ('\u{1b05}', '\u{1b33}'), ('\u{1b45}', '\u{1b4b}'), ('\u{1b83}',
++ '\u{1ba0}'), ('\u{1bae}', '\u{1baf}'), ('\u{1bba}', '\u{1be5}'), ('\u{1c00}', '\u{1c23}'),
++ ('\u{1c4d}', '\u{1c4f}'), ('\u{1c5a}', '\u{1c7d}'), ('\u{1c80}', '\u{1c88}'), ('\u{1ce9}',
++ '\u{1cec}'), ('\u{1cee}', '\u{1cf1}'), ('\u{1cf5}', '\u{1cf6}'), ('\u{1d00}', '\u{1dbf}'),
++ ('\u{1e00}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), ('\u{1f48}',
++ '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'),
++ ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}',
++ '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'),
++ ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}',
++ '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'),
++ ('\u{2090}', '\u{209c}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}',
++ '\u{2113}'), ('\u{2115}', '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'),
++ ('\u{2126}', '\u{2126}'), ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}',
++ '\u{213f}'), ('\u{2145}', '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'),
++ ('\u{2c00}', '\u{2c2e}'), ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}',
++ '\u{2cee}'), ('\u{2cf2}', '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'),
++ ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}', '\u{2d6f}'), ('\u{2d80}',
++ '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', '\u{2dae}'), ('\u{2db0}', '\u{2db6}'),
++ ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}',
++ '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{3029}'),
++ ('\u{3031}', '\u{3035}'), ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{309d}',
++ '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'),
++ ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}',
++ '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'),
++ ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a61f}'), ('\u{a62a}', '\u{a62b}'), ('\u{a640}',
++ '\u{a66e}'), ('\u{a67f}', '\u{a69d}'), ('\u{a6a0}', '\u{a6ef}'), ('\u{a717}', '\u{a71f}'),
++ ('\u{a722}', '\u{a788}'), ('\u{a78b}', '\u{a7ae}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}',
++ '\u{a801}'), ('\u{a803}', '\u{a805}'), ('\u{a807}', '\u{a80a}'), ('\u{a80c}', '\u{a822}'),
++ ('\u{a840}', '\u{a873}'), ('\u{a882}', '\u{a8b3}'), ('\u{a8f2}', '\u{a8f7}'), ('\u{a8fb}',
++ '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a90a}', '\u{a925}'), ('\u{a930}', '\u{a946}'),
++ ('\u{a960}', '\u{a97c}'), ('\u{a984}', '\u{a9b2}'), ('\u{a9cf}', '\u{a9cf}'), ('\u{a9e0}',
++ '\u{a9e4}'), ('\u{a9e6}', '\u{a9ef}'), ('\u{a9fa}', '\u{a9fe}'), ('\u{aa00}', '\u{aa28}'),
++ ('\u{aa40}', '\u{aa42}'), ('\u{aa44}', '\u{aa4b}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}',
++ '\u{aa7a}'), ('\u{aa7e}', '\u{aaaf}'), ('\u{aab1}', '\u{aab1}'), ('\u{aab5}', '\u{aab6}'),
++ ('\u{aab9}', '\u{aabd}'), ('\u{aac0}', '\u{aac0}'), ('\u{aac2}', '\u{aac2}'), ('\u{aadb}',
++ '\u{aadd}'), ('\u{aae0}', '\u{aaea}'), ('\u{aaf2}', '\u{aaf4}'), ('\u{ab01}', '\u{ab06}'),
++ ('\u{ab09}', '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}',
++ '\u{ab2e}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abe2}'),
++ ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}',
++ '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'),
++ ('\u{fb1d}', '\u{fb1d}'), ('\u{fb1f}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}',
++ '\u{fb3c}'), ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'),
++ ('\u{fb46}', '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}',
++ '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe71}', '\u{fe71}'),
++ ('\u{fe73}', '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}',
++ '\u{fe7b}'), ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff21}', '\u{ff3a}'),
++ ('\u{ff41}', '\u{ff5a}'), ('\u{ff66}', '\u{ff9d}'), ('\u{ffa0}', '\u{ffbe}'), ('\u{ffc2}',
++ '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'),
++ ('\u{10000}', '\u{1000b}'), ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'),
++ ('\u{1003c}', '\u{1003d}'), ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'),
++ ('\u{10080}', '\u{100fa}'), ('\u{10140}', '\u{10174}'), ('\u{10280}', '\u{1029c}'),
++ ('\u{102a0}', '\u{102d0}'), ('\u{10300}', '\u{1031f}'), ('\u{10330}', '\u{1034a}'),
++ ('\u{10350}', '\u{10375}'), ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'),
++ ('\u{103c8}', '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'),
++ ('\u{104b0}', '\u{104d3}'), ('\u{104d8}', '\u{104fb}'), ('\u{10500}', '\u{10527}'),
++ ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'),
++ ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'),
++ ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'),
++ ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'),
++ ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'),
++ ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'),
++ ('\u{10a00}', '\u{10a00}'), ('\u{10a10}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'),
++ ('\u{10a19}', '\u{10a33}'), ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'),
++ ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', '\u{10ae4}'), ('\u{10b00}', '\u{10b35}'),
++ ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'),
++ ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'),
++ ('\u{11003}', '\u{11037}'), ('\u{11083}', '\u{110af}'), ('\u{110d0}', '\u{110e8}'),
++ ('\u{11103}', '\u{11126}'), ('\u{11150}', '\u{11172}'), ('\u{11176}', '\u{11176}'),
++ ('\u{11183}', '\u{111b2}'), ('\u{111c1}', '\u{111c4}'), ('\u{111da}', '\u{111da}'),
++ ('\u{111dc}', '\u{111dc}'), ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{1122b}'),
++ ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'),
++ ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112de}'),
++ ('\u{11305}', '\u{1130c}'), ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'),
++ ('\u{1132a}', '\u{11330}'), ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'),
++ ('\u{1133d}', '\u{1133d}'), ('\u{11350}', '\u{11350}'), ('\u{1135d}', '\u{11361}'),
++ ('\u{11400}', '\u{11434}'), ('\u{11447}', '\u{1144a}'), ('\u{11480}', '\u{114af}'),
++ ('\u{114c4}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{11580}', '\u{115ae}'),
++ ('\u{115d8}', '\u{115db}'), ('\u{11600}', '\u{1162f}'), ('\u{11644}', '\u{11644}'),
++ ('\u{11680}', '\u{116aa}'), ('\u{11700}', '\u{11719}'), ('\u{118a0}', '\u{118df}'),
++ ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'),
++ ('\u{11c0a}', '\u{11c2e}'), ('\u{11c40}', '\u{11c40}'), ('\u{11c72}', '\u{11c8f}'),
++ ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'),
++ ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'),
++ ('\u{16a40}', '\u{16a5e}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16b00}', '\u{16b2f}'),
++ ('\u{16b40}', '\u{16b43}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'),
++ ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f50}'), ('\u{16f93}', '\u{16f9f}'),
++ ('\u{16fe0}', '\u{16fe0}'), ('\u{17000}', '\u{187ec}'), ('\u{18800}', '\u{18af2}'),
++ ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'),
++ ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1d400}', '\u{1d454}'),
++ ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'),
++ ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'),
++ ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'),
++ ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'),
++ ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'),
++ ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'),
++ ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'),
++ ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'),
++ ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'),
++ ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1e800}', '\u{1e8c4}'),
++ ('\u{1e900}', '\u{1e943}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'),
++ ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'),
++ ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'),
++ ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'),
++ ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'),
++ ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'),
++ ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'),
++ ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'),
++ ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'),
++ ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'),
++ ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'),
++ ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'),
++ ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}')
++ ];
++
++ pub fn XID_Start(c: char) -> bool {
++ super::bsearch_range_table(c, XID_Start_table)
++ }
++
++}
++
+diff --git a/third_party/rust/unicode-xid-0.1.0/src/tests.rs b/third_party/rust/unicode-xid-0.1.0/src/tests.rs
+new file mode 100644
+--- /dev/null
++++ b/third_party/rust/unicode-xid-0.1.0/src/tests.rs
+@@ -0,0 +1,113 @@
++// Copyright 2012-2015 The Rust Project Developers. See the COPYRIGHT
++// file at the top-level directory of this distribution and at
++// http://rust-lang.org/COPYRIGHT.
++//
++// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
++// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
++// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
++// option. This file may not be copied, modified, or distributed
++// except according to those terms.
++
++#[cfg(feature = "bench")]
++use std::iter;
++#[cfg(feature = "bench")]
++use test::Bencher;
++#[cfg(feature = "bench")]
++use std::prelude::v1::*;
++
++use super::UnicodeXID;
++
++#[cfg(feature = "bench")]
++#[bench]
++fn cargo_is_xid_start(b: &mut Bencher) {
++ let string = iter::repeat('a').take(4096).collect::<String>();
++
++ b.bytes = string.len() as u64;
++ b.iter(|| {
++ string.chars().all(UnicodeXID::is_xid_start)
++ });
++}
++
++#[cfg(feature = "bench")]
++#[bench]
++fn stdlib_is_xid_start(b: &mut Bencher) {
++ let string = iter::repeat('a').take(4096).collect::<String>();
++
++ b.bytes = string.len() as u64;
++ b.iter(|| {
++ string.chars().all(char::is_xid_start)
++ });
++}
++
++#[cfg(feature = "bench")]
++#[bench]
++fn cargo_xid_continue(b: &mut Bencher) {
++ let string = iter::repeat('a').take(4096).collect::<String>();
++
++ b.bytes = string.len() as u64;
++ b.iter(|| {
++ string.chars().all(UnicodeXID::is_xid_continue)
++ });
++}
++
++#[cfg(feature = "bench")]
++#[bench]
++fn stdlib_xid_continue(b: &mut Bencher) {
++ let string = iter::repeat('a').take(4096).collect::<String>();
++
++ b.bytes = string.len() as u64;
++ b.iter(|| {
++ string.chars().all(char::is_xid_continue)
++ });
++}
++
++#[test]
++fn test_is_xid_start() {
++ let chars = [
++ 'A', 'Z', 'a', 'z',
++ '\u{1000d}', '\u{10026}',
++ ];
++
++ for ch in &chars {
++ assert!(UnicodeXID::is_xid_start(*ch), "{}", ch);
++ }
++}
++
++#[test]
++fn test_is_not_xid_start() {
++ let chars = [
++ '\x00', '\x01',
++ '0', '9',
++ ' ', '[', '<', '{', '(',
++ '\u{02c2}', '\u{ffff}',
++ ];
++
++ for ch in &chars {
++ assert!(!UnicodeXID::is_xid_start(*ch), "{}", ch);
++ }
++}
++
++#[test]
++fn test_is_xid_continue() {
++ let chars = [
++ '0', '9', 'A', 'Z', 'a', 'z', '_',
++ '\u{1000d}', '\u{10026}',
++ ];
++
++ for ch in &chars {
++ assert!(UnicodeXID::is_xid_continue(*ch), "{}", ch);
++ }
++}
++
++#[test]
++fn test_is_not_xid_continue() {
++ let chars = [
++ '\x00', '\x01',
++ ' ', '[', '<', '{', '(',
++ '\u{02c2}', '\u{ffff}',
++ ];
++
++ for &ch in &chars {
++ assert!(!UnicodeXID::is_xid_continue(ch), "{}", ch);
++ }
++}
+diff --git a/third_party/rust/unicode-xid/.cargo-checksum.json b/third_party/rust/unicode-xid/.cargo-checksum.json
+--- a/third_party/rust/unicode-xid/.cargo-checksum.json
++++ b/third_party/rust/unicode-xid/.cargo-checksum.json
+@@ -1 +1 @@
+-{"files":{"COPYRIGHT":"23860c2a7b5d96b21569afedf033469bab9fe14a1b24a35068b8641c578ce24d","Cargo.toml":"aafcae4002bee71546a6aa40a97b9124a69f169ee7e3a9e3262338e32b4c2b9b","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"67998486b32f4fe46abbbaa411b92528750e7f0e22452dc8a5b95d87d80fde75","scripts/unicode.py":"762eea92dd51238c6bf877570bde1149932ba15cf87be1618fc21cd53e941733","src/lib.rs":"4a89fadf452ae7c53536eaa4496f951a3153f8189dd1cbc532648731d30f0b11","src/tables.rs":"0643459b6ebeeed83aecd7604f0ea29c06bea7ce6c1cd9acd4988d27ace1ec53","src/tests.rs":"35a459382e190197e7b9a78832ae79f310b48a02a5b4227bf9bbc89d46c8deac"},"package":"fc72304796d0818e357ead4e000d19c9c174ab23dc11093ac919054d20a6a7fc"}
+\ No newline at end of file
++{"files":{"COPYRIGHT":"23860c2a7b5d96b21569afedf033469bab9fe14a1b24a35068b8641c578ce24d","Cargo.toml":"d675b1531f28cec902162b875d7718cbbacdbb97d60a043940d08d2368e660f3","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"7b63ecd5f1902af1b63729947373683c32745c16a10e8e6292e2e2dcd7e90ae0","README.md":"c9874f691222e560b8a468adf8c471d30a6efe8d02d6f3457dcbc4f95ac1e978","src/lib.rs":"4038be7839c000997565e21eaebf20842d389a64b057fcd519b674c03c466f77","src/tables.rs":"40f3711b453ea58a02e69a37f66ebe4fcb49d2cc4da5b013c732539cfceee48c","src/tests.rs":"c2bb7412b6cee1a330c85f3d60eb77e98162f98f268b6fe9f748d8910f78a026"},"package":"826e7639553986605ec5979c7dd957c7895e93eabed50ab2ffa7f6128a75097c"}
+\ No newline at end of file
+diff --git a/third_party/rust/unicode-xid/Cargo.toml b/third_party/rust/unicode-xid/Cargo.toml
+--- a/third_party/rust/unicode-xid/Cargo.toml
++++ b/third_party/rust/unicode-xid/Cargo.toml
+@@ -1,26 +1,31 @@
++# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
++#
++# When uploading crates to the registry Cargo will automatically
++# "normalize" Cargo.toml files for maximal compatibility
++# with all versions of Cargo and also rewrite `path` dependencies
++# to registry (e.g., crates.io) dependencies
++#
++# If you believe there's an error in this file please file an
++# issue against the rust-lang/cargo repository. If you're
++# editing this file be aware that the upstream Cargo.toml
++# will likely look very different (and much more reasonable)
++
+ [package]
+-
+ name = "unicode-xid"
+-version = "0.1.0"
+-authors = ["erick.tryzelaar <erick.tryzelaar@gmail.com>",
+- "kwantam <kwantam@gmail.com>",
+- ]
+-
++version = "0.2.0"
++authors = ["erick.tryzelaar <erick.tryzelaar@gmail.com>", "kwantam <kwantam@gmail.com>"]
++exclude = ["/scripts/*", "/.travis.yml"]
++description = "Determine whether characters have the XID_Start\nor XID_Continue properties according to\nUnicode Standard Annex #31.\n"
+ homepage = "https://github.com/unicode-rs/unicode-xid"
+-repository = "https://github.com/unicode-rs/unicode-xid"
+ documentation = "https://unicode-rs.github.io/unicode-xid"
+-license = "MIT/Apache-2.0"
++readme = "README.md"
+ keywords = ["text", "unicode", "xid"]
+-readme = "README.md"
+-description = """
+-Determine whether characters have the XID_Start
+-or XID_Continue properties according to
+-Unicode Standard Annex #31.
+-"""
+-
+-exclude = [ "target/*", "Cargo.lock" ]
++license = "MIT OR Apache-2.0"
++repository = "https://github.com/unicode-rs/unicode-xid"
+
+ [features]
++bench = []
+ default = []
+ no_std = []
+-bench = []
++[badges.travis-ci]
++repository = "unicode-rs/unicode-xid"
+diff --git a/third_party/rust/unicode-xid/README.md b/third_party/rust/unicode-xid/README.md
+--- a/third_party/rust/unicode-xid/README.md
++++ b/third_party/rust/unicode-xid/README.md
+@@ -30,5 +30,15 @@
+
+ ```toml
+ [dependencies]
+-unicode-xid = "0.0.4"
++unicode-xid = "0.1.0"
+ ```
++
++# changelog
++
++## 0.2.0
++
++- Update to Unicode 12.1.0.
++
++## 0.1.0
++
++- Initial release.
+diff --git a/third_party/rust/unicode-xid/src/lib.rs b/third_party/rust/unicode-xid/src/lib.rs
+--- a/third_party/rust/unicode-xid/src/lib.rs
++++ b/third_party/rust/unicode-xid/src/lib.rs
+@@ -42,7 +42,7 @@
+ html_favicon_url = "https://unicode-rs.github.io/unicode-rs_sm.png")]
+
+ #![no_std]
+-#![cfg_attr(feature = "bench", feature(test, unicode))]
++#![cfg_attr(feature = "bench", feature(test, unicode_internals))]
+
+ #[cfg(test)]
+ #[macro_use]
+diff --git a/third_party/rust/unicode-xid/src/tables.rs b/third_party/rust/unicode-xid/src/tables.rs
+--- a/third_party/rust/unicode-xid/src/tables.rs
++++ b/third_party/rust/unicode-xid/src/tables.rs
+@@ -14,9 +14,9 @@
+
+ /// The version of [Unicode](http://www.unicode.org/)
+ /// that this version of unicode-xid is based on.
+-pub const UNICODE_VERSION: (u64, u64, u64) = (9, 0, 0);
++pub const UNICODE_VERSION: (u64, u64, u64) = (12, 1, 0);
+
+-fn bsearch_range_table(c: char, r: &'static [(char,char)]) -> bool {
++fn bsearch_range_table(c: char, r: &[(char,char)]) -> bool {
+ use core::cmp::Ordering::{Equal, Less, Greater};
+
+ r.binary_search_by(|&(lo,hi)| {
+@@ -27,7 +27,7 @@
+ }
+
+ pub mod derived_property {
+- pub const XID_Continue_table: &'static [(char, char)] = &[
++ pub const XID_Continue_table: &[(char, char)] = &[
+ ('\u{30}', '\u{39}'), ('\u{41}', '\u{5a}'), ('\u{5f}', '\u{5f}'), ('\u{61}', '\u{7a}'),
+ ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'), ('\u{b7}', '\u{b7}'), ('\u{ba}', '\u{ba}'),
+ ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'), ('\u{2c6}', '\u{2d1}'),
+@@ -35,387 +35,412 @@
+ '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'), ('\u{37f}', '\u{37f}'),
+ ('\u{386}', '\u{38a}'), ('\u{38c}', '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}',
+ '\u{3f5}'), ('\u{3f7}', '\u{481}'), ('\u{483}', '\u{487}'), ('\u{48a}', '\u{52f}'),
+- ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}', '\u{587}'), ('\u{591}',
++ ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{560}', '\u{588}'), ('\u{591}',
+ '\u{5bd}'), ('\u{5bf}', '\u{5bf}'), ('\u{5c1}', '\u{5c2}'), ('\u{5c4}', '\u{5c5}'),
+- ('\u{5c7}', '\u{5c7}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{610}',
++ ('\u{5c7}', '\u{5c7}'), ('\u{5d0}', '\u{5ea}'), ('\u{5ef}', '\u{5f2}'), ('\u{610}',
+ '\u{61a}'), ('\u{620}', '\u{669}'), ('\u{66e}', '\u{6d3}'), ('\u{6d5}', '\u{6dc}'),
+ ('\u{6df}', '\u{6e8}'), ('\u{6ea}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'), ('\u{710}',
+ '\u{74a}'), ('\u{74d}', '\u{7b1}'), ('\u{7c0}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'),
+- ('\u{800}', '\u{82d}'), ('\u{840}', '\u{85b}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}',
+- '\u{8bd}'), ('\u{8d4}', '\u{8e1}'), ('\u{8e3}', '\u{963}'), ('\u{966}', '\u{96f}'),
+- ('\u{971}', '\u{983}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'), ('\u{993}',
+- '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', '\u{9b9}'),
+- ('\u{9bc}', '\u{9c4}'), ('\u{9c7}', '\u{9c8}'), ('\u{9cb}', '\u{9ce}'), ('\u{9d7}',
+- '\u{9d7}'), ('\u{9dc}', '\u{9dd}'), ('\u{9df}', '\u{9e3}'), ('\u{9e6}', '\u{9f1}'),
+- ('\u{a01}', '\u{a03}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'), ('\u{a13}',
+- '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}', '\u{a36}'),
+- ('\u{a38}', '\u{a39}'), ('\u{a3c}', '\u{a3c}'), ('\u{a3e}', '\u{a42}'), ('\u{a47}',
+- '\u{a48}'), ('\u{a4b}', '\u{a4d}'), ('\u{a51}', '\u{a51}'), ('\u{a59}', '\u{a5c}'),
+- ('\u{a5e}', '\u{a5e}'), ('\u{a66}', '\u{a75}'), ('\u{a81}', '\u{a83}'), ('\u{a85}',
+- '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}', '\u{ab0}'),
+- ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abc}', '\u{ac5}'), ('\u{ac7}',
+- '\u{ac9}'), ('\u{acb}', '\u{acd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae3}'),
+- ('\u{ae6}', '\u{aef}'), ('\u{af9}', '\u{af9}'), ('\u{b01}', '\u{b03}'), ('\u{b05}',
++ ('\u{7fd}', '\u{7fd}'), ('\u{800}', '\u{82d}'), ('\u{840}', '\u{85b}'), ('\u{860}',
++ '\u{86a}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}', '\u{8bd}'), ('\u{8d3}', '\u{8e1}'),
++ ('\u{8e3}', '\u{963}'), ('\u{966}', '\u{96f}'), ('\u{971}', '\u{983}'), ('\u{985}',
++ '\u{98c}'), ('\u{98f}', '\u{990}'), ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'),
++ ('\u{9b2}', '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), ('\u{9bc}', '\u{9c4}'), ('\u{9c7}',
++ '\u{9c8}'), ('\u{9cb}', '\u{9ce}'), ('\u{9d7}', '\u{9d7}'), ('\u{9dc}', '\u{9dd}'),
++ ('\u{9df}', '\u{9e3}'), ('\u{9e6}', '\u{9f1}'), ('\u{9fc}', '\u{9fc}'), ('\u{9fe}',
++ '\u{9fe}'), ('\u{a01}', '\u{a03}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'),
++ ('\u{a13}', '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}',
++ '\u{a36}'), ('\u{a38}', '\u{a39}'), ('\u{a3c}', '\u{a3c}'), ('\u{a3e}', '\u{a42}'),
++ ('\u{a47}', '\u{a48}'), ('\u{a4b}', '\u{a4d}'), ('\u{a51}', '\u{a51}'), ('\u{a59}',
++ '\u{a5c}'), ('\u{a5e}', '\u{a5e}'), ('\u{a66}', '\u{a75}'), ('\u{a81}', '\u{a83}'),
++ ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}',
++ '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abc}', '\u{ac5}'),
++ ('\u{ac7}', '\u{ac9}'), ('\u{acb}', '\u{acd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}',
++ '\u{ae3}'), ('\u{ae6}', '\u{aef}'), ('\u{af9}', '\u{aff}'), ('\u{b01}', '\u{b03}'),
++ ('\u{b05}', '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}',
++ '\u{b30}'), ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3c}', '\u{b44}'),
++ ('\u{b47}', '\u{b48}'), ('\u{b4b}', '\u{b4d}'), ('\u{b56}', '\u{b57}'), ('\u{b5c}',
++ '\u{b5d}'), ('\u{b5f}', '\u{b63}'), ('\u{b66}', '\u{b6f}'), ('\u{b71}', '\u{b71}'),
++ ('\u{b82}', '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}',
++ '\u{b95}'), ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'),
++ ('\u{ba3}', '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bbe}',
++ '\u{bc2}'), ('\u{bc6}', '\u{bc8}'), ('\u{bca}', '\u{bcd}'), ('\u{bd0}', '\u{bd0}'),
++ ('\u{bd7}', '\u{bd7}'), ('\u{be6}', '\u{bef}'), ('\u{c00}', '\u{c0c}'), ('\u{c0e}',
++ '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}', '\u{c44}'),
++ ('\u{c46}', '\u{c48}'), ('\u{c4a}', '\u{c4d}'), ('\u{c55}', '\u{c56}'), ('\u{c58}',
++ '\u{c5a}'), ('\u{c60}', '\u{c63}'), ('\u{c66}', '\u{c6f}'), ('\u{c80}', '\u{c83}'),
++ ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}',
++ '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbc}', '\u{cc4}'), ('\u{cc6}', '\u{cc8}'),
++ ('\u{cca}', '\u{ccd}'), ('\u{cd5}', '\u{cd6}'), ('\u{cde}', '\u{cde}'), ('\u{ce0}',
++ '\u{ce3}'), ('\u{ce6}', '\u{cef}'), ('\u{cf1}', '\u{cf2}'), ('\u{d00}', '\u{d03}'),
++ ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d44}'), ('\u{d46}',
++ '\u{d48}'), ('\u{d4a}', '\u{d4e}'), ('\u{d54}', '\u{d57}'), ('\u{d5f}', '\u{d63}'),
++ ('\u{d66}', '\u{d6f}'), ('\u{d7a}', '\u{d7f}'), ('\u{d82}', '\u{d83}'), ('\u{d85}',
++ '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'),
++ ('\u{dc0}', '\u{dc6}'), ('\u{dca}', '\u{dca}'), ('\u{dcf}', '\u{dd4}'), ('\u{dd6}',
++ '\u{dd6}'), ('\u{dd8}', '\u{ddf}'), ('\u{de6}', '\u{def}'), ('\u{df2}', '\u{df3}'),
++ ('\u{e01}', '\u{e3a}'), ('\u{e40}', '\u{e4e}'), ('\u{e50}', '\u{e59}'), ('\u{e81}',
++ '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e86}', '\u{e8a}'), ('\u{e8c}', '\u{ea3}'),
++ ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'), ('\u{ec6}',
++ '\u{ec6}'), ('\u{ec8}', '\u{ecd}'), ('\u{ed0}', '\u{ed9}'), ('\u{edc}', '\u{edf}'),
++ ('\u{f00}', '\u{f00}'), ('\u{f18}', '\u{f19}'), ('\u{f20}', '\u{f29}'), ('\u{f35}',
++ '\u{f35}'), ('\u{f37}', '\u{f37}'), ('\u{f39}', '\u{f39}'), ('\u{f3e}', '\u{f47}'),
++ ('\u{f49}', '\u{f6c}'), ('\u{f71}', '\u{f84}'), ('\u{f86}', '\u{f97}'), ('\u{f99}',
++ '\u{fbc}'), ('\u{fc6}', '\u{fc6}'), ('\u{1000}', '\u{1049}'), ('\u{1050}', '\u{109d}'),
++ ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}', '\u{10cd}'), ('\u{10d0}',
++ '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'), ('\u{1250}', '\u{1256}'),
++ ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}', '\u{1288}'), ('\u{128a}',
++ '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'), ('\u{12b8}', '\u{12be}'),
++ ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}', '\u{12d6}'), ('\u{12d8}',
++ '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'), ('\u{135d}', '\u{135f}'),
++ ('\u{1369}', '\u{1371}'), ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}',
++ '\u{13fd}'), ('\u{1401}', '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'),
++ ('\u{16a0}', '\u{16ea}'), ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}',
++ '\u{1714}'), ('\u{1720}', '\u{1734}'), ('\u{1740}', '\u{1753}'), ('\u{1760}', '\u{176c}'),
++ ('\u{176e}', '\u{1770}'), ('\u{1772}', '\u{1773}'), ('\u{1780}', '\u{17d3}'), ('\u{17d7}',
++ '\u{17d7}'), ('\u{17dc}', '\u{17dd}'), ('\u{17e0}', '\u{17e9}'), ('\u{180b}', '\u{180d}'),
++ ('\u{1810}', '\u{1819}'), ('\u{1820}', '\u{1878}'), ('\u{1880}', '\u{18aa}'), ('\u{18b0}',
++ '\u{18f5}'), ('\u{1900}', '\u{191e}'), ('\u{1920}', '\u{192b}'), ('\u{1930}', '\u{193b}'),
++ ('\u{1946}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}', '\u{19ab}'), ('\u{19b0}',
++ '\u{19c9}'), ('\u{19d0}', '\u{19da}'), ('\u{1a00}', '\u{1a1b}'), ('\u{1a20}', '\u{1a5e}'),
++ ('\u{1a60}', '\u{1a7c}'), ('\u{1a7f}', '\u{1a89}'), ('\u{1a90}', '\u{1a99}'), ('\u{1aa7}',
++ '\u{1aa7}'), ('\u{1ab0}', '\u{1abd}'), ('\u{1b00}', '\u{1b4b}'), ('\u{1b50}', '\u{1b59}'),
++ ('\u{1b6b}', '\u{1b73}'), ('\u{1b80}', '\u{1bf3}'), ('\u{1c00}', '\u{1c37}'), ('\u{1c40}',
++ '\u{1c49}'), ('\u{1c4d}', '\u{1c7d}'), ('\u{1c80}', '\u{1c88}'), ('\u{1c90}', '\u{1cba}'),
++ ('\u{1cbd}', '\u{1cbf}'), ('\u{1cd0}', '\u{1cd2}'), ('\u{1cd4}', '\u{1cfa}'), ('\u{1d00}',
++ '\u{1df9}'), ('\u{1dfb}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'),
++ ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}',
++ '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'),
++ ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}',
++ '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'),
++ ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{203f}', '\u{2040}'), ('\u{2054}',
++ '\u{2054}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', '\u{209c}'),
++ ('\u{20d0}', '\u{20dc}'), ('\u{20e1}', '\u{20e1}'), ('\u{20e5}', '\u{20f0}'), ('\u{2102}',
++ '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), ('\u{2115}', '\u{2115}'),
++ ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', '\u{2126}'), ('\u{2128}',
++ '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), ('\u{2145}', '\u{2149}'),
++ ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', '\u{2c2e}'), ('\u{2c30}',
++ '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'),
++ ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}',
++ '\u{2d6f}'), ('\u{2d7f}', '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', '\u{2dae}'),
++ ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}',
++ '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{2de0}', '\u{2dff}'),
++ ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{302f}'), ('\u{3031}', '\u{3035}'), ('\u{3038}',
++ '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{3099}', '\u{309a}'), ('\u{309d}', '\u{309f}'),
++ ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312f}'), ('\u{3131}',
++ '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', '\u{4db5}'),
++ ('\u{4e00}', '\u{9fef}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), ('\u{a500}',
++ '\u{a60c}'), ('\u{a610}', '\u{a62b}'), ('\u{a640}', '\u{a66f}'), ('\u{a674}', '\u{a67d}'),
++ ('\u{a67f}', '\u{a6f1}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', '\u{a788}'), ('\u{a78b}',
++ '\u{a7bf}'), ('\u{a7c2}', '\u{a7c6}'), ('\u{a7f7}', '\u{a827}'), ('\u{a840}', '\u{a873}'),
++ ('\u{a880}', '\u{a8c5}'), ('\u{a8d0}', '\u{a8d9}'), ('\u{a8e0}', '\u{a8f7}'), ('\u{a8fb}',
++ '\u{a8fb}'), ('\u{a8fd}', '\u{a92d}'), ('\u{a930}', '\u{a953}'), ('\u{a960}', '\u{a97c}'),
++ ('\u{a980}', '\u{a9c0}'), ('\u{a9cf}', '\u{a9d9}'), ('\u{a9e0}', '\u{a9fe}'), ('\u{aa00}',
++ '\u{aa36}'), ('\u{aa40}', '\u{aa4d}'), ('\u{aa50}', '\u{aa59}'), ('\u{aa60}', '\u{aa76}'),
++ ('\u{aa7a}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'), ('\u{aae0}', '\u{aaef}'), ('\u{aaf2}',
++ '\u{aaf6}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}', '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'),
++ ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}',
++ '\u{ab67}'), ('\u{ab70}', '\u{abea}'), ('\u{abec}', '\u{abed}'), ('\u{abf0}', '\u{abf9}'),
++ ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}',
++ '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'),
++ ('\u{fb1d}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}',
++ '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'),
++ ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}',
++ '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe00}', '\u{fe0f}'), ('\u{fe20}', '\u{fe2f}'),
++ ('\u{fe33}', '\u{fe34}'), ('\u{fe4d}', '\u{fe4f}'), ('\u{fe71}', '\u{fe71}'), ('\u{fe73}',
++ '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'),
++ ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff10}', '\u{ff19}'), ('\u{ff21}',
++ '\u{ff3a}'), ('\u{ff3f}', '\u{ff3f}'), ('\u{ff41}', '\u{ff5a}'), ('\u{ff66}', '\u{ffbe}'),
++ ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}',
++ '\u{ffdc}'), ('\u{10000}', '\u{1000b}'), ('\u{1000d}', '\u{10026}'), ('\u{10028}',
++ '\u{1003a}'), ('\u{1003c}', '\u{1003d}'), ('\u{1003f}', '\u{1004d}'), ('\u{10050}',
++ '\u{1005d}'), ('\u{10080}', '\u{100fa}'), ('\u{10140}', '\u{10174}'), ('\u{101fd}',
++ '\u{101fd}'), ('\u{10280}', '\u{1029c}'), ('\u{102a0}', '\u{102d0}'), ('\u{102e0}',
++ '\u{102e0}'), ('\u{10300}', '\u{1031f}'), ('\u{1032d}', '\u{1034a}'), ('\u{10350}',
++ '\u{1037a}'), ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'), ('\u{103c8}',
++ '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'), ('\u{104a0}',
++ '\u{104a9}'), ('\u{104b0}', '\u{104d3}'), ('\u{104d8}', '\u{104fb}'), ('\u{10500}',
++ '\u{10527}'), ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}',
++ '\u{10755}'), ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}',
++ '\u{10808}'), ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}',
++ '\u{1083c}'), ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}',
++ '\u{1089e}'), ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}',
++ '\u{10915}'), ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}',
++ '\u{109bf}'), ('\u{10a00}', '\u{10a03}'), ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}',
++ '\u{10a13}'), ('\u{10a15}', '\u{10a17}'), ('\u{10a19}', '\u{10a35}'), ('\u{10a38}',
++ '\u{10a3a}'), ('\u{10a3f}', '\u{10a3f}'), ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}',
++ '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', '\u{10ae6}'), ('\u{10b00}',
++ '\u{10b35}'), ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', '\u{10b72}'), ('\u{10b80}',
++ '\u{10b91}'), ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}',
++ '\u{10cf2}'), ('\u{10d00}', '\u{10d27}'), ('\u{10d30}', '\u{10d39}'), ('\u{10f00}',
++ '\u{10f1c}'), ('\u{10f27}', '\u{10f27}'), ('\u{10f30}', '\u{10f50}'), ('\u{10fe0}',
++ '\u{10ff6}'), ('\u{11000}', '\u{11046}'), ('\u{11066}', '\u{1106f}'), ('\u{1107f}',
++ '\u{110ba}'), ('\u{110d0}', '\u{110e8}'), ('\u{110f0}', '\u{110f9}'), ('\u{11100}',
++ '\u{11134}'), ('\u{11136}', '\u{1113f}'), ('\u{11144}', '\u{11146}'), ('\u{11150}',
++ '\u{11173}'), ('\u{11176}', '\u{11176}'), ('\u{11180}', '\u{111c4}'), ('\u{111c9}',
++ '\u{111cc}'), ('\u{111d0}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'), ('\u{11200}',
++ '\u{11211}'), ('\u{11213}', '\u{11237}'), ('\u{1123e}', '\u{1123e}'), ('\u{11280}',
++ '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), ('\u{1128f}',
++ '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112ea}'), ('\u{112f0}',
++ '\u{112f9}'), ('\u{11300}', '\u{11303}'), ('\u{11305}', '\u{1130c}'), ('\u{1130f}',
++ '\u{11310}'), ('\u{11313}', '\u{11328}'), ('\u{1132a}', '\u{11330}'), ('\u{11332}',
++ '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133b}', '\u{11344}'), ('\u{11347}',
++ '\u{11348}'), ('\u{1134b}', '\u{1134d}'), ('\u{11350}', '\u{11350}'), ('\u{11357}',
++ '\u{11357}'), ('\u{1135d}', '\u{11363}'), ('\u{11366}', '\u{1136c}'), ('\u{11370}',
++ '\u{11374}'), ('\u{11400}', '\u{1144a}'), ('\u{11450}', '\u{11459}'), ('\u{1145e}',
++ '\u{1145f}'), ('\u{11480}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{114d0}',
++ '\u{114d9}'), ('\u{11580}', '\u{115b5}'), ('\u{115b8}', '\u{115c0}'), ('\u{115d8}',
++ '\u{115dd}'), ('\u{11600}', '\u{11640}'), ('\u{11644}', '\u{11644}'), ('\u{11650}',
++ '\u{11659}'), ('\u{11680}', '\u{116b8}'), ('\u{116c0}', '\u{116c9}'), ('\u{11700}',
++ '\u{1171a}'), ('\u{1171d}', '\u{1172b}'), ('\u{11730}', '\u{11739}'), ('\u{11800}',
++ '\u{1183a}'), ('\u{118a0}', '\u{118e9}'), ('\u{118ff}', '\u{118ff}'), ('\u{119a0}',
++ '\u{119a7}'), ('\u{119aa}', '\u{119d7}'), ('\u{119da}', '\u{119e1}'), ('\u{119e3}',
++ '\u{119e4}'), ('\u{11a00}', '\u{11a3e}'), ('\u{11a47}', '\u{11a47}'), ('\u{11a50}',
++ '\u{11a99}'), ('\u{11a9d}', '\u{11a9d}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}',
++ '\u{11c08}'), ('\u{11c0a}', '\u{11c36}'), ('\u{11c38}', '\u{11c40}'), ('\u{11c50}',
++ '\u{11c59}'), ('\u{11c72}', '\u{11c8f}'), ('\u{11c92}', '\u{11ca7}'), ('\u{11ca9}',
++ '\u{11cb6}'), ('\u{11d00}', '\u{11d06}'), ('\u{11d08}', '\u{11d09}'), ('\u{11d0b}',
++ '\u{11d36}'), ('\u{11d3a}', '\u{11d3a}'), ('\u{11d3c}', '\u{11d3d}'), ('\u{11d3f}',
++ '\u{11d47}'), ('\u{11d50}', '\u{11d59}'), ('\u{11d60}', '\u{11d65}'), ('\u{11d67}',
++ '\u{11d68}'), ('\u{11d6a}', '\u{11d8e}'), ('\u{11d90}', '\u{11d91}'), ('\u{11d93}',
++ '\u{11d98}'), ('\u{11da0}', '\u{11da9}'), ('\u{11ee0}', '\u{11ef6}'), ('\u{12000}',
++ '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'), ('\u{13000}',
++ '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'), ('\u{16a40}',
++ '\u{16a5e}'), ('\u{16a60}', '\u{16a69}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16af0}',
++ '\u{16af4}'), ('\u{16b00}', '\u{16b36}'), ('\u{16b40}', '\u{16b43}'), ('\u{16b50}',
++ '\u{16b59}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'), ('\u{16e40}',
++ '\u{16e7f}'), ('\u{16f00}', '\u{16f4a}'), ('\u{16f4f}', '\u{16f87}'), ('\u{16f8f}',
++ '\u{16f9f}'), ('\u{16fe0}', '\u{16fe1}'), ('\u{16fe3}', '\u{16fe3}'), ('\u{17000}',
++ '\u{187f7}'), ('\u{18800}', '\u{18af2}'), ('\u{1b000}', '\u{1b11e}'), ('\u{1b150}',
++ '\u{1b152}'), ('\u{1b164}', '\u{1b167}'), ('\u{1b170}', '\u{1b2fb}'), ('\u{1bc00}',
++ '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'), ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}',
++ '\u{1bc99}'), ('\u{1bc9d}', '\u{1bc9e}'), ('\u{1d165}', '\u{1d169}'), ('\u{1d16d}',
++ '\u{1d172}'), ('\u{1d17b}', '\u{1d182}'), ('\u{1d185}', '\u{1d18b}'), ('\u{1d1aa}',
++ '\u{1d1ad}'), ('\u{1d242}', '\u{1d244}'), ('\u{1d400}', '\u{1d454}'), ('\u{1d456}',
++ '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}',
++ '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}',
++ '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}',
++ '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}',
++ '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'), ('\u{1d546}',
++ '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}',
++ '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}',
++ '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}',
++ '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}',
++ '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1d7ce}', '\u{1d7ff}'), ('\u{1da00}',
++ '\u{1da36}'), ('\u{1da3b}', '\u{1da6c}'), ('\u{1da75}', '\u{1da75}'), ('\u{1da84}',
++ '\u{1da84}'), ('\u{1da9b}', '\u{1da9f}'), ('\u{1daa1}', '\u{1daaf}'), ('\u{1e000}',
++ '\u{1e006}'), ('\u{1e008}', '\u{1e018}'), ('\u{1e01b}', '\u{1e021}'), ('\u{1e023}',
++ '\u{1e024}'), ('\u{1e026}', '\u{1e02a}'), ('\u{1e100}', '\u{1e12c}'), ('\u{1e130}',
++ '\u{1e13d}'), ('\u{1e140}', '\u{1e149}'), ('\u{1e14e}', '\u{1e14e}'), ('\u{1e2c0}',
++ '\u{1e2f9}'), ('\u{1e800}', '\u{1e8c4}'), ('\u{1e8d0}', '\u{1e8d6}'), ('\u{1e900}',
++ '\u{1e94b}'), ('\u{1e950}', '\u{1e959}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}',
++ '\u{1ee1f}'), ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}',
++ '\u{1ee27}'), ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}',
++ '\u{1ee39}'), ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}',
++ '\u{1ee47}'), ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}',
++ '\u{1ee4f}'), ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}',
++ '\u{1ee57}'), ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}',
++ '\u{1ee5d}'), ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}',
++ '\u{1ee64}'), ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}',
++ '\u{1ee77}'), ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}',
++ '\u{1ee89}'), ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}',
++ '\u{1eea9}'), ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}',
++ '\u{2b734}'), ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2ceb0}',
++ '\u{2ebe0}'), ('\u{2f800}', '\u{2fa1d}'), ('\u{e0100}', '\u{e01ef}')
++ ];
++
++ pub fn XID_Continue(c: char) -> bool {
++ super::bsearch_range_table(c, XID_Continue_table)
++ }
++
++ pub const XID_Start_table: &[(char, char)] = &[
++ ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'),
++ ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'),
++ ('\u{2c6}', '\u{2d1}'), ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}',
++ '\u{2ee}'), ('\u{370}', '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'),
++ ('\u{37f}', '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}',
++ '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', '\u{3f5}'), ('\u{3f7}', '\u{481}'),
++ ('\u{48a}', '\u{52f}'), ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{560}',
++ '\u{588}'), ('\u{5d0}', '\u{5ea}'), ('\u{5ef}', '\u{5f2}'), ('\u{620}', '\u{64a}'),
++ ('\u{66e}', '\u{66f}'), ('\u{671}', '\u{6d3}'), ('\u{6d5}', '\u{6d5}'), ('\u{6e5}',
++ '\u{6e6}'), ('\u{6ee}', '\u{6ef}'), ('\u{6fa}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'),
++ ('\u{710}', '\u{710}'), ('\u{712}', '\u{72f}'), ('\u{74d}', '\u{7a5}'), ('\u{7b1}',
++ '\u{7b1}'), ('\u{7ca}', '\u{7ea}'), ('\u{7f4}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'),
++ ('\u{800}', '\u{815}'), ('\u{81a}', '\u{81a}'), ('\u{824}', '\u{824}'), ('\u{828}',
++ '\u{828}'), ('\u{840}', '\u{858}'), ('\u{860}', '\u{86a}'), ('\u{8a0}', '\u{8b4}'),
++ ('\u{8b6}', '\u{8bd}'), ('\u{904}', '\u{939}'), ('\u{93d}', '\u{93d}'), ('\u{950}',
++ '\u{950}'), ('\u{958}', '\u{961}'), ('\u{971}', '\u{980}'), ('\u{985}', '\u{98c}'),
++ ('\u{98f}', '\u{990}'), ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}',
++ '\u{9b2}'), ('\u{9b6}', '\u{9b9}'), ('\u{9bd}', '\u{9bd}'), ('\u{9ce}', '\u{9ce}'),
++ ('\u{9dc}', '\u{9dd}'), ('\u{9df}', '\u{9e1}'), ('\u{9f0}', '\u{9f1}'), ('\u{9fc}',
++ '\u{9fc}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}', '\u{a10}'), ('\u{a13}', '\u{a28}'),
++ ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'), ('\u{a35}', '\u{a36}'), ('\u{a38}',
++ '\u{a39}'), ('\u{a59}', '\u{a5c}'), ('\u{a5e}', '\u{a5e}'), ('\u{a72}', '\u{a74}'),
++ ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'), ('\u{a93}', '\u{aa8}'), ('\u{aaa}',
++ '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}', '\u{ab9}'), ('\u{abd}', '\u{abd}'),
++ ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae1}'), ('\u{af9}', '\u{af9}'), ('\u{b05}',
+ '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}', '\u{b28}'), ('\u{b2a}', '\u{b30}'),
+- ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3c}', '\u{b44}'), ('\u{b47}',
+- '\u{b48}'), ('\u{b4b}', '\u{b4d}'), ('\u{b56}', '\u{b57}'), ('\u{b5c}', '\u{b5d}'),
+- ('\u{b5f}', '\u{b63}'), ('\u{b66}', '\u{b6f}'), ('\u{b71}', '\u{b71}'), ('\u{b82}',
+- '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}', '\u{b95}'),
+- ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'), ('\u{ba3}',
+- '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bbe}', '\u{bc2}'),
+- ('\u{bc6}', '\u{bc8}'), ('\u{bca}', '\u{bcd}'), ('\u{bd0}', '\u{bd0}'), ('\u{bd7}',
+- '\u{bd7}'), ('\u{be6}', '\u{bef}'), ('\u{c00}', '\u{c03}'), ('\u{c05}', '\u{c0c}'),
+- ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}',
+- '\u{c44}'), ('\u{c46}', '\u{c48}'), ('\u{c4a}', '\u{c4d}'), ('\u{c55}', '\u{c56}'),
+- ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c63}'), ('\u{c66}', '\u{c6f}'), ('\u{c80}',
+- '\u{c83}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'),
+- ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbc}', '\u{cc4}'), ('\u{cc6}',
+- '\u{cc8}'), ('\u{cca}', '\u{ccd}'), ('\u{cd5}', '\u{cd6}'), ('\u{cde}', '\u{cde}'),
+- ('\u{ce0}', '\u{ce3}'), ('\u{ce6}', '\u{cef}'), ('\u{cf1}', '\u{cf2}'), ('\u{d01}',
+- '\u{d03}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'),
+- ('\u{d3d}', '\u{d44}'), ('\u{d46}', '\u{d48}'), ('\u{d4a}', '\u{d4e}'), ('\u{d54}',
+- '\u{d57}'), ('\u{d5f}', '\u{d63}'), ('\u{d66}', '\u{d6f}'), ('\u{d7a}', '\u{d7f}'),
+- ('\u{d82}', '\u{d83}'), ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}',
+- '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{dca}', '\u{dca}'),
+- ('\u{dcf}', '\u{dd4}'), ('\u{dd6}', '\u{dd6}'), ('\u{dd8}', '\u{ddf}'), ('\u{de6}',
+- '\u{def}'), ('\u{df2}', '\u{df3}'), ('\u{e01}', '\u{e3a}'), ('\u{e40}', '\u{e4e}'),
+- ('\u{e50}', '\u{e59}'), ('\u{e81}', '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e87}',
+- '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}', '\u{e8d}'), ('\u{e94}', '\u{e97}'),
+- ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'), ('\u{ea5}', '\u{ea5}'), ('\u{ea7}',
+- '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}', '\u{eb9}'), ('\u{ebb}', '\u{ebd}'),
+- ('\u{ec0}', '\u{ec4}'), ('\u{ec6}', '\u{ec6}'), ('\u{ec8}', '\u{ecd}'), ('\u{ed0}',
+- '\u{ed9}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f18}', '\u{f19}'),
+- ('\u{f20}', '\u{f29}'), ('\u{f35}', '\u{f35}'), ('\u{f37}', '\u{f37}'), ('\u{f39}',
+- '\u{f39}'), ('\u{f3e}', '\u{f47}'), ('\u{f49}', '\u{f6c}'), ('\u{f71}', '\u{f84}'),
+- ('\u{f86}', '\u{f97}'), ('\u{f99}', '\u{fbc}'), ('\u{fc6}', '\u{fc6}'), ('\u{1000}',
+- '\u{1049}'), ('\u{1050}', '\u{109d}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'),
+- ('\u{10cd}', '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}',
+- '\u{124d}'), ('\u{1250}', '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'),
+- ('\u{1260}', '\u{1288}'), ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}',
+- '\u{12b5}'), ('\u{12b8}', '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'),
+- ('\u{12c8}', '\u{12d6}'), ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}',
+- '\u{135a}'), ('\u{135d}', '\u{135f}'), ('\u{1369}', '\u{1371}'), ('\u{1380}', '\u{138f}'),
+- ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}', '\u{166c}'), ('\u{166f}',
+- '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'), ('\u{16ee}', '\u{16f8}'),
+- ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1714}'), ('\u{1720}', '\u{1734}'), ('\u{1740}',
+- '\u{1753}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'), ('\u{1772}', '\u{1773}'),
+- ('\u{1780}', '\u{17d3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dd}'), ('\u{17e0}',
+- '\u{17e9}'), ('\u{180b}', '\u{180d}'), ('\u{1810}', '\u{1819}'), ('\u{1820}', '\u{1877}'),
+- ('\u{1880}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), ('\u{1900}', '\u{191e}'), ('\u{1920}',
+- '\u{192b}'), ('\u{1930}', '\u{193b}'), ('\u{1946}', '\u{196d}'), ('\u{1970}', '\u{1974}'),
+- ('\u{1980}', '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{19d0}', '\u{19da}'), ('\u{1a00}',
+- '\u{1a1b}'), ('\u{1a20}', '\u{1a5e}'), ('\u{1a60}', '\u{1a7c}'), ('\u{1a7f}', '\u{1a89}'),
+- ('\u{1a90}', '\u{1a99}'), ('\u{1aa7}', '\u{1aa7}'), ('\u{1ab0}', '\u{1abd}'), ('\u{1b00}',
+- '\u{1b4b}'), ('\u{1b50}', '\u{1b59}'), ('\u{1b6b}', '\u{1b73}'), ('\u{1b80}', '\u{1bf3}'),
+- ('\u{1c00}', '\u{1c37}'), ('\u{1c40}', '\u{1c49}'), ('\u{1c4d}', '\u{1c7d}'), ('\u{1c80}',
+- '\u{1c88}'), ('\u{1cd0}', '\u{1cd2}'), ('\u{1cd4}', '\u{1cf6}'), ('\u{1cf8}', '\u{1cf9}'),
+- ('\u{1d00}', '\u{1df5}'), ('\u{1dfb}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}',
+- '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'),
+- ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}',
+- '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'),
+- ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}',
+- '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{203f}', '\u{2040}'),
+- ('\u{2054}', '\u{2054}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}',
+- '\u{209c}'), ('\u{20d0}', '\u{20dc}'), ('\u{20e1}', '\u{20e1}'), ('\u{20e5}', '\u{20f0}'),
++ ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'), ('\u{b3d}', '\u{b3d}'), ('\u{b5c}',
++ '\u{b5d}'), ('\u{b5f}', '\u{b61}'), ('\u{b71}', '\u{b71}'), ('\u{b83}', '\u{b83}'),
++ ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'), ('\u{b92}', '\u{b95}'), ('\u{b99}',
++ '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}', '\u{b9f}'), ('\u{ba3}', '\u{ba4}'),
++ ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'), ('\u{bd0}', '\u{bd0}'), ('\u{c05}',
++ '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}', '\u{c28}'), ('\u{c2a}', '\u{c39}'),
++ ('\u{c3d}', '\u{c3d}'), ('\u{c58}', '\u{c5a}'), ('\u{c60}', '\u{c61}'), ('\u{c80}',
++ '\u{c80}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}', '\u{c90}'), ('\u{c92}', '\u{ca8}'),
++ ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'), ('\u{cbd}', '\u{cbd}'), ('\u{cde}',
++ '\u{cde}'), ('\u{ce0}', '\u{ce1}'), ('\u{cf1}', '\u{cf2}'), ('\u{d05}', '\u{d0c}'),
++ ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'), ('\u{d3d}', '\u{d3d}'), ('\u{d4e}',
++ '\u{d4e}'), ('\u{d54}', '\u{d56}'), ('\u{d5f}', '\u{d61}'), ('\u{d7a}', '\u{d7f}'),
++ ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'), ('\u{db3}', '\u{dbb}'), ('\u{dbd}',
++ '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{e01}', '\u{e30}'), ('\u{e32}', '\u{e32}'),
++ ('\u{e40}', '\u{e46}'), ('\u{e81}', '\u{e82}'), ('\u{e84}', '\u{e84}'), ('\u{e86}',
++ '\u{e8a}'), ('\u{e8c}', '\u{ea3}'), ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{eb0}'),
++ ('\u{eb2}', '\u{eb2}'), ('\u{ebd}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'), ('\u{ec6}',
++ '\u{ec6}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f40}', '\u{f47}'),
++ ('\u{f49}', '\u{f6c}'), ('\u{f88}', '\u{f8c}'), ('\u{1000}', '\u{102a}'), ('\u{103f}',
++ '\u{103f}'), ('\u{1050}', '\u{1055}'), ('\u{105a}', '\u{105d}'), ('\u{1061}', '\u{1061}'),
++ ('\u{1065}', '\u{1066}'), ('\u{106e}', '\u{1070}'), ('\u{1075}', '\u{1081}'), ('\u{108e}',
++ '\u{108e}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}', '\u{10cd}'),
++ ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'), ('\u{1250}',
++ '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}', '\u{1288}'),
++ ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'), ('\u{12b8}',
++ '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}', '\u{12d6}'),
++ ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'), ('\u{1380}',
++ '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}', '\u{166c}'),
++ ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'), ('\u{16ee}',
++ '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1711}'), ('\u{1720}', '\u{1731}'),
++ ('\u{1740}', '\u{1751}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'), ('\u{1780}',
++ '\u{17b3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dc}'), ('\u{1820}', '\u{1878}'),
++ ('\u{1880}', '\u{18a8}'), ('\u{18aa}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'), ('\u{1900}',
++ '\u{191e}'), ('\u{1950}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}', '\u{19ab}'),
++ ('\u{19b0}', '\u{19c9}'), ('\u{1a00}', '\u{1a16}'), ('\u{1a20}', '\u{1a54}'), ('\u{1aa7}',
++ '\u{1aa7}'), ('\u{1b05}', '\u{1b33}'), ('\u{1b45}', '\u{1b4b}'), ('\u{1b83}', '\u{1ba0}'),
++ ('\u{1bae}', '\u{1baf}'), ('\u{1bba}', '\u{1be5}'), ('\u{1c00}', '\u{1c23}'), ('\u{1c4d}',
++ '\u{1c4f}'), ('\u{1c5a}', '\u{1c7d}'), ('\u{1c80}', '\u{1c88}'), ('\u{1c90}', '\u{1cba}'),
++ ('\u{1cbd}', '\u{1cbf}'), ('\u{1ce9}', '\u{1cec}'), ('\u{1cee}', '\u{1cf3}'), ('\u{1cf5}',
++ '\u{1cf6}'), ('\u{1cfa}', '\u{1cfa}'), ('\u{1d00}', '\u{1dbf}'), ('\u{1e00}', '\u{1f15}'),
++ ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), ('\u{1f48}', '\u{1f4d}'), ('\u{1f50}',
++ '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'), ('\u{1f5d}', '\u{1f5d}'),
++ ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}', '\u{1fbc}'), ('\u{1fbe}',
++ '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'), ('\u{1fd0}', '\u{1fd3}'),
++ ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}', '\u{1ff4}'), ('\u{1ff6}',
++ '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'), ('\u{2090}', '\u{209c}'),
+ ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}', '\u{2113}'), ('\u{2115}',
+ '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'), ('\u{2126}', '\u{2126}'),
+ ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}', '\u{213f}'), ('\u{2145}',
+ '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'), ('\u{2c00}', '\u{2c2e}'),
+- ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cf3}'), ('\u{2d00}',
+- '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'),
+- ('\u{2d6f}', '\u{2d6f}'), ('\u{2d7f}', '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}',
+- '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'),
+- ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{2de0}',
+- '\u{2dff}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{302f}'), ('\u{3031}', '\u{3035}'),
+- ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{3099}', '\u{309a}'), ('\u{309d}',
+- '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'),
+- ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}',
+- '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'),
+- ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a62b}'), ('\u{a640}', '\u{a66f}'), ('\u{a674}',
+- '\u{a67d}'), ('\u{a67f}', '\u{a6f1}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', '\u{a788}'),
+- ('\u{a78b}', '\u{a7ae}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}', '\u{a827}'), ('\u{a840}',
+- '\u{a873}'), ('\u{a880}', '\u{a8c5}'), ('\u{a8d0}', '\u{a8d9}'), ('\u{a8e0}', '\u{a8f7}'),
+- ('\u{a8fb}', '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a900}', '\u{a92d}'), ('\u{a930}',
+- '\u{a953}'), ('\u{a960}', '\u{a97c}'), ('\u{a980}', '\u{a9c0}'), ('\u{a9cf}', '\u{a9d9}'),
+- ('\u{a9e0}', '\u{a9fe}'), ('\u{aa00}', '\u{aa36}'), ('\u{aa40}', '\u{aa4d}'), ('\u{aa50}',
+- '\u{aa59}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'),
+- ('\u{aae0}', '\u{aaef}'), ('\u{aaf2}', '\u{aaf6}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}',
+- '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'),
+- ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abea}'), ('\u{abec}',
+- '\u{abed}'), ('\u{abf0}', '\u{abf9}'), ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'),
+- ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}',
+- '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{fb1d}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'),
+- ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}',
+- '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'),
+- ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe00}',
+- '\u{fe0f}'), ('\u{fe20}', '\u{fe2f}'), ('\u{fe33}', '\u{fe34}'), ('\u{fe4d}', '\u{fe4f}'),
+- ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}',
+- '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'),
+- ('\u{ff10}', '\u{ff19}'), ('\u{ff21}', '\u{ff3a}'), ('\u{ff3f}', '\u{ff3f}'), ('\u{ff41}',
+- '\u{ff5a}'), ('\u{ff66}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'),
+- ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', '\u{1000b}'),
++ ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}', '\u{2cee}'), ('\u{2cf2}',
++ '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'), ('\u{2d2d}', '\u{2d2d}'),
++ ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}', '\u{2d6f}'), ('\u{2d80}', '\u{2d96}'), ('\u{2da0}',
++ '\u{2da6}'), ('\u{2da8}', '\u{2dae}'), ('\u{2db0}', '\u{2db6}'), ('\u{2db8}', '\u{2dbe}'),
++ ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}', '\u{2dd6}'), ('\u{2dd8}',
++ '\u{2dde}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{3029}'), ('\u{3031}', '\u{3035}'),
++ ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{309d}', '\u{309f}'), ('\u{30a1}',
++ '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312f}'), ('\u{3131}', '\u{318e}'),
++ ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}', '\u{4db5}'), ('\u{4e00}',
++ '\u{9fef}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'), ('\u{a500}', '\u{a60c}'),
++ ('\u{a610}', '\u{a61f}'), ('\u{a62a}', '\u{a62b}'), ('\u{a640}', '\u{a66e}'), ('\u{a67f}',
++ '\u{a69d}'), ('\u{a6a0}', '\u{a6ef}'), ('\u{a717}', '\u{a71f}'), ('\u{a722}', '\u{a788}'),
++ ('\u{a78b}', '\u{a7bf}'), ('\u{a7c2}', '\u{a7c6}'), ('\u{a7f7}', '\u{a801}'), ('\u{a803}',
++ '\u{a805}'), ('\u{a807}', '\u{a80a}'), ('\u{a80c}', '\u{a822}'), ('\u{a840}', '\u{a873}'),
++ ('\u{a882}', '\u{a8b3}'), ('\u{a8f2}', '\u{a8f7}'), ('\u{a8fb}', '\u{a8fb}'), ('\u{a8fd}',
++ '\u{a8fe}'), ('\u{a90a}', '\u{a925}'), ('\u{a930}', '\u{a946}'), ('\u{a960}', '\u{a97c}'),
++ ('\u{a984}', '\u{a9b2}'), ('\u{a9cf}', '\u{a9cf}'), ('\u{a9e0}', '\u{a9e4}'), ('\u{a9e6}',
++ '\u{a9ef}'), ('\u{a9fa}', '\u{a9fe}'), ('\u{aa00}', '\u{aa28}'), ('\u{aa40}', '\u{aa42}'),
++ ('\u{aa44}', '\u{aa4b}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}', '\u{aa7a}'), ('\u{aa7e}',
++ '\u{aaaf}'), ('\u{aab1}', '\u{aab1}'), ('\u{aab5}', '\u{aab6}'), ('\u{aab9}', '\u{aabd}'),
++ ('\u{aac0}', '\u{aac0}'), ('\u{aac2}', '\u{aac2}'), ('\u{aadb}', '\u{aadd}'), ('\u{aae0}',
++ '\u{aaea}'), ('\u{aaf2}', '\u{aaf4}'), ('\u{ab01}', '\u{ab06}'), ('\u{ab09}', '\u{ab0e}'),
++ ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}', '\u{ab2e}'), ('\u{ab30}',
++ '\u{ab5a}'), ('\u{ab5c}', '\u{ab67}'), ('\u{ab70}', '\u{abe2}'), ('\u{ac00}', '\u{d7a3}'),
++ ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}', '\u{fa6d}'), ('\u{fa70}',
++ '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'), ('\u{fb1d}', '\u{fb1d}'),
++ ('\u{fb1f}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}', '\u{fb3c}'), ('\u{fb3e}',
++ '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'), ('\u{fb46}', '\u{fbb1}'),
++ ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}', '\u{fd8f}'), ('\u{fd92}',
++ '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe71}', '\u{fe71}'), ('\u{fe73}', '\u{fe73}'),
++ ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}', '\u{fe7b}'), ('\u{fe7d}',
++ '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff21}', '\u{ff3a}'), ('\u{ff41}', '\u{ff5a}'),
++ ('\u{ff66}', '\u{ff9d}'), ('\u{ffa0}', '\u{ffbe}'), ('\u{ffc2}', '\u{ffc7}'), ('\u{ffca}',
++ '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'), ('\u{10000}', '\u{1000b}'),
+ ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'), ('\u{1003c}', '\u{1003d}'),
+ ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'), ('\u{10080}', '\u{100fa}'),
+- ('\u{10140}', '\u{10174}'), ('\u{101fd}', '\u{101fd}'), ('\u{10280}', '\u{1029c}'),
+- ('\u{102a0}', '\u{102d0}'), ('\u{102e0}', '\u{102e0}'), ('\u{10300}', '\u{1031f}'),
+- ('\u{10330}', '\u{1034a}'), ('\u{10350}', '\u{1037a}'), ('\u{10380}', '\u{1039d}'),
+- ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', '\u{103cf}'), ('\u{103d1}', '\u{103d5}'),
+- ('\u{10400}', '\u{1049d}'), ('\u{104a0}', '\u{104a9}'), ('\u{104b0}', '\u{104d3}'),
++ ('\u{10140}', '\u{10174}'), ('\u{10280}', '\u{1029c}'), ('\u{102a0}', '\u{102d0}'),
++ ('\u{10300}', '\u{1031f}'), ('\u{1032d}', '\u{1034a}'), ('\u{10350}', '\u{10375}'),
++ ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'), ('\u{103c8}', '\u{103cf}'),
++ ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'), ('\u{104b0}', '\u{104d3}'),
+ ('\u{104d8}', '\u{104fb}'), ('\u{10500}', '\u{10527}'), ('\u{10530}', '\u{10563}'),
+ ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'), ('\u{10760}', '\u{10767}'),
+ ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'), ('\u{1080a}', '\u{10835}'),
+ ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'), ('\u{1083f}', '\u{10855}'),
+ ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'), ('\u{108e0}', '\u{108f2}'),
+ ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'), ('\u{10920}', '\u{10939}'),
+- ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'), ('\u{10a00}', '\u{10a03}'),
+- ('\u{10a05}', '\u{10a06}'), ('\u{10a0c}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'),
+- ('\u{10a19}', '\u{10a33}'), ('\u{10a38}', '\u{10a3a}'), ('\u{10a3f}', '\u{10a3f}'),
++ ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'), ('\u{10a00}', '\u{10a00}'),
++ ('\u{10a10}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'), ('\u{10a19}', '\u{10a35}'),
+ ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'), ('\u{10ac0}', '\u{10ac7}'),
+- ('\u{10ac9}', '\u{10ae6}'), ('\u{10b00}', '\u{10b35}'), ('\u{10b40}', '\u{10b55}'),
++ ('\u{10ac9}', '\u{10ae4}'), ('\u{10b00}', '\u{10b35}'), ('\u{10b40}', '\u{10b55}'),
+ ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'), ('\u{10c00}', '\u{10c48}'),
+- ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{11000}', '\u{11046}'),
+- ('\u{11066}', '\u{1106f}'), ('\u{1107f}', '\u{110ba}'), ('\u{110d0}', '\u{110e8}'),
+- ('\u{110f0}', '\u{110f9}'), ('\u{11100}', '\u{11134}'), ('\u{11136}', '\u{1113f}'),
+- ('\u{11150}', '\u{11173}'), ('\u{11176}', '\u{11176}'), ('\u{11180}', '\u{111c4}'),
+- ('\u{111ca}', '\u{111cc}'), ('\u{111d0}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'),
+- ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{11237}'), ('\u{1123e}', '\u{1123e}'),
+- ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'),
+- ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112ea}'),
+- ('\u{112f0}', '\u{112f9}'), ('\u{11300}', '\u{11303}'), ('\u{11305}', '\u{1130c}'),
++ ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'), ('\u{10d00}', '\u{10d23}'),
++ ('\u{10f00}', '\u{10f1c}'), ('\u{10f27}', '\u{10f27}'), ('\u{10f30}', '\u{10f45}'),
++ ('\u{10fe0}', '\u{10ff6}'), ('\u{11003}', '\u{11037}'), ('\u{11083}', '\u{110af}'),
++ ('\u{110d0}', '\u{110e8}'), ('\u{11103}', '\u{11126}'), ('\u{11144}', '\u{11144}'),
++ ('\u{11150}', '\u{11172}'), ('\u{11176}', '\u{11176}'), ('\u{11183}', '\u{111b2}'),
++ ('\u{111c1}', '\u{111c4}'), ('\u{111da}', '\u{111da}'), ('\u{111dc}', '\u{111dc}'),
++ ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{1122b}'), ('\u{11280}', '\u{11286}'),
++ ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'), ('\u{1128f}', '\u{1129d}'),
++ ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112de}'), ('\u{11305}', '\u{1130c}'),
+ ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'), ('\u{1132a}', '\u{11330}'),
+- ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133c}', '\u{11344}'),
+- ('\u{11347}', '\u{11348}'), ('\u{1134b}', '\u{1134d}'), ('\u{11350}', '\u{11350}'),
+- ('\u{11357}', '\u{11357}'), ('\u{1135d}', '\u{11363}'), ('\u{11366}', '\u{1136c}'),
+- ('\u{11370}', '\u{11374}'), ('\u{11400}', '\u{1144a}'), ('\u{11450}', '\u{11459}'),
+- ('\u{11480}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{114d0}', '\u{114d9}'),
+- ('\u{11580}', '\u{115b5}'), ('\u{115b8}', '\u{115c0}'), ('\u{115d8}', '\u{115dd}'),
+- ('\u{11600}', '\u{11640}'), ('\u{11644}', '\u{11644}'), ('\u{11650}', '\u{11659}'),
+- ('\u{11680}', '\u{116b7}'), ('\u{116c0}', '\u{116c9}'), ('\u{11700}', '\u{11719}'),
+- ('\u{1171d}', '\u{1172b}'), ('\u{11730}', '\u{11739}'), ('\u{118a0}', '\u{118e9}'),
+- ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'),
+- ('\u{11c0a}', '\u{11c36}'), ('\u{11c38}', '\u{11c40}'), ('\u{11c50}', '\u{11c59}'),
+- ('\u{11c72}', '\u{11c8f}'), ('\u{11c92}', '\u{11ca7}'), ('\u{11ca9}', '\u{11cb6}'),
+- ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'),
+- ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'),
+- ('\u{16a40}', '\u{16a5e}'), ('\u{16a60}', '\u{16a69}'), ('\u{16ad0}', '\u{16aed}'),
+- ('\u{16af0}', '\u{16af4}'), ('\u{16b00}', '\u{16b36}'), ('\u{16b40}', '\u{16b43}'),
+- ('\u{16b50}', '\u{16b59}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'),
+- ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f7e}'), ('\u{16f8f}', '\u{16f9f}'),
+- ('\u{16fe0}', '\u{16fe0}'), ('\u{17000}', '\u{187ec}'), ('\u{18800}', '\u{18af2}'),
+- ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'),
+- ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1bc9d}', '\u{1bc9e}'),
+- ('\u{1d165}', '\u{1d169}'), ('\u{1d16d}', '\u{1d172}'), ('\u{1d17b}', '\u{1d182}'),
+- ('\u{1d185}', '\u{1d18b}'), ('\u{1d1aa}', '\u{1d1ad}'), ('\u{1d242}', '\u{1d244}'),
+- ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'),
+- ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'),
+- ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'),
+- ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'),
+- ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'),
+- ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'),
+- ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'),
+- ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'),
+- ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'),
+- ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'),
+- ('\u{1d7ce}', '\u{1d7ff}'), ('\u{1da00}', '\u{1da36}'), ('\u{1da3b}', '\u{1da6c}'),
+- ('\u{1da75}', '\u{1da75}'), ('\u{1da84}', '\u{1da84}'), ('\u{1da9b}', '\u{1da9f}'),
+- ('\u{1daa1}', '\u{1daaf}'), ('\u{1e000}', '\u{1e006}'), ('\u{1e008}', '\u{1e018}'),
+- ('\u{1e01b}', '\u{1e021}'), ('\u{1e023}', '\u{1e024}'), ('\u{1e026}', '\u{1e02a}'),
+- ('\u{1e800}', '\u{1e8c4}'), ('\u{1e8d0}', '\u{1e8d6}'), ('\u{1e900}', '\u{1e94a}'),
+- ('\u{1e950}', '\u{1e959}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'),
+- ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'),
+- ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'),
+- ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'),
+- ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'),
+- ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'),
+- ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'),
+- ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'),
+- ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'),
+- ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'),
+- ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'),
+- ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'),
+- ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}'),
+- ('\u{e0100}', '\u{e01ef}')
+- ];
+-
+- pub fn XID_Continue(c: char) -> bool {
+- super::bsearch_range_table(c, XID_Continue_table)
+- }
+-
+- pub const XID_Start_table: &'static [(char, char)] = &[
+- ('\u{41}', '\u{5a}'), ('\u{61}', '\u{7a}'), ('\u{aa}', '\u{aa}'), ('\u{b5}', '\u{b5}'),
+- ('\u{ba}', '\u{ba}'), ('\u{c0}', '\u{d6}'), ('\u{d8}', '\u{f6}'), ('\u{f8}', '\u{2c1}'),
+- ('\u{2c6}', '\u{2d1}'), ('\u{2e0}', '\u{2e4}'), ('\u{2ec}', '\u{2ec}'), ('\u{2ee}',
+- '\u{2ee}'), ('\u{370}', '\u{374}'), ('\u{376}', '\u{377}'), ('\u{37b}', '\u{37d}'),
+- ('\u{37f}', '\u{37f}'), ('\u{386}', '\u{386}'), ('\u{388}', '\u{38a}'), ('\u{38c}',
+- '\u{38c}'), ('\u{38e}', '\u{3a1}'), ('\u{3a3}', '\u{3f5}'), ('\u{3f7}', '\u{481}'),
+- ('\u{48a}', '\u{52f}'), ('\u{531}', '\u{556}'), ('\u{559}', '\u{559}'), ('\u{561}',
+- '\u{587}'), ('\u{5d0}', '\u{5ea}'), ('\u{5f0}', '\u{5f2}'), ('\u{620}', '\u{64a}'),
+- ('\u{66e}', '\u{66f}'), ('\u{671}', '\u{6d3}'), ('\u{6d5}', '\u{6d5}'), ('\u{6e5}',
+- '\u{6e6}'), ('\u{6ee}', '\u{6ef}'), ('\u{6fa}', '\u{6fc}'), ('\u{6ff}', '\u{6ff}'),
+- ('\u{710}', '\u{710}'), ('\u{712}', '\u{72f}'), ('\u{74d}', '\u{7a5}'), ('\u{7b1}',
+- '\u{7b1}'), ('\u{7ca}', '\u{7ea}'), ('\u{7f4}', '\u{7f5}'), ('\u{7fa}', '\u{7fa}'),
+- ('\u{800}', '\u{815}'), ('\u{81a}', '\u{81a}'), ('\u{824}', '\u{824}'), ('\u{828}',
+- '\u{828}'), ('\u{840}', '\u{858}'), ('\u{8a0}', '\u{8b4}'), ('\u{8b6}', '\u{8bd}'),
+- ('\u{904}', '\u{939}'), ('\u{93d}', '\u{93d}'), ('\u{950}', '\u{950}'), ('\u{958}',
+- '\u{961}'), ('\u{971}', '\u{980}'), ('\u{985}', '\u{98c}'), ('\u{98f}', '\u{990}'),
+- ('\u{993}', '\u{9a8}'), ('\u{9aa}', '\u{9b0}'), ('\u{9b2}', '\u{9b2}'), ('\u{9b6}',
+- '\u{9b9}'), ('\u{9bd}', '\u{9bd}'), ('\u{9ce}', '\u{9ce}'), ('\u{9dc}', '\u{9dd}'),
+- ('\u{9df}', '\u{9e1}'), ('\u{9f0}', '\u{9f1}'), ('\u{a05}', '\u{a0a}'), ('\u{a0f}',
+- '\u{a10}'), ('\u{a13}', '\u{a28}'), ('\u{a2a}', '\u{a30}'), ('\u{a32}', '\u{a33}'),
+- ('\u{a35}', '\u{a36}'), ('\u{a38}', '\u{a39}'), ('\u{a59}', '\u{a5c}'), ('\u{a5e}',
+- '\u{a5e}'), ('\u{a72}', '\u{a74}'), ('\u{a85}', '\u{a8d}'), ('\u{a8f}', '\u{a91}'),
+- ('\u{a93}', '\u{aa8}'), ('\u{aaa}', '\u{ab0}'), ('\u{ab2}', '\u{ab3}'), ('\u{ab5}',
+- '\u{ab9}'), ('\u{abd}', '\u{abd}'), ('\u{ad0}', '\u{ad0}'), ('\u{ae0}', '\u{ae1}'),
+- ('\u{af9}', '\u{af9}'), ('\u{b05}', '\u{b0c}'), ('\u{b0f}', '\u{b10}'), ('\u{b13}',
+- '\u{b28}'), ('\u{b2a}', '\u{b30}'), ('\u{b32}', '\u{b33}'), ('\u{b35}', '\u{b39}'),
+- ('\u{b3d}', '\u{b3d}'), ('\u{b5c}', '\u{b5d}'), ('\u{b5f}', '\u{b61}'), ('\u{b71}',
+- '\u{b71}'), ('\u{b83}', '\u{b83}'), ('\u{b85}', '\u{b8a}'), ('\u{b8e}', '\u{b90}'),
+- ('\u{b92}', '\u{b95}'), ('\u{b99}', '\u{b9a}'), ('\u{b9c}', '\u{b9c}'), ('\u{b9e}',
+- '\u{b9f}'), ('\u{ba3}', '\u{ba4}'), ('\u{ba8}', '\u{baa}'), ('\u{bae}', '\u{bb9}'),
+- ('\u{bd0}', '\u{bd0}'), ('\u{c05}', '\u{c0c}'), ('\u{c0e}', '\u{c10}'), ('\u{c12}',
+- '\u{c28}'), ('\u{c2a}', '\u{c39}'), ('\u{c3d}', '\u{c3d}'), ('\u{c58}', '\u{c5a}'),
+- ('\u{c60}', '\u{c61}'), ('\u{c80}', '\u{c80}'), ('\u{c85}', '\u{c8c}'), ('\u{c8e}',
+- '\u{c90}'), ('\u{c92}', '\u{ca8}'), ('\u{caa}', '\u{cb3}'), ('\u{cb5}', '\u{cb9}'),
+- ('\u{cbd}', '\u{cbd}'), ('\u{cde}', '\u{cde}'), ('\u{ce0}', '\u{ce1}'), ('\u{cf1}',
+- '\u{cf2}'), ('\u{d05}', '\u{d0c}'), ('\u{d0e}', '\u{d10}'), ('\u{d12}', '\u{d3a}'),
+- ('\u{d3d}', '\u{d3d}'), ('\u{d4e}', '\u{d4e}'), ('\u{d54}', '\u{d56}'), ('\u{d5f}',
+- '\u{d61}'), ('\u{d7a}', '\u{d7f}'), ('\u{d85}', '\u{d96}'), ('\u{d9a}', '\u{db1}'),
+- ('\u{db3}', '\u{dbb}'), ('\u{dbd}', '\u{dbd}'), ('\u{dc0}', '\u{dc6}'), ('\u{e01}',
+- '\u{e30}'), ('\u{e32}', '\u{e32}'), ('\u{e40}', '\u{e46}'), ('\u{e81}', '\u{e82}'),
+- ('\u{e84}', '\u{e84}'), ('\u{e87}', '\u{e88}'), ('\u{e8a}', '\u{e8a}'), ('\u{e8d}',
+- '\u{e8d}'), ('\u{e94}', '\u{e97}'), ('\u{e99}', '\u{e9f}'), ('\u{ea1}', '\u{ea3}'),
+- ('\u{ea5}', '\u{ea5}'), ('\u{ea7}', '\u{ea7}'), ('\u{eaa}', '\u{eab}'), ('\u{ead}',
+- '\u{eb0}'), ('\u{eb2}', '\u{eb2}'), ('\u{ebd}', '\u{ebd}'), ('\u{ec0}', '\u{ec4}'),
+- ('\u{ec6}', '\u{ec6}'), ('\u{edc}', '\u{edf}'), ('\u{f00}', '\u{f00}'), ('\u{f40}',
+- '\u{f47}'), ('\u{f49}', '\u{f6c}'), ('\u{f88}', '\u{f8c}'), ('\u{1000}', '\u{102a}'),
+- ('\u{103f}', '\u{103f}'), ('\u{1050}', '\u{1055}'), ('\u{105a}', '\u{105d}'), ('\u{1061}',
+- '\u{1061}'), ('\u{1065}', '\u{1066}'), ('\u{106e}', '\u{1070}'), ('\u{1075}', '\u{1081}'),
+- ('\u{108e}', '\u{108e}'), ('\u{10a0}', '\u{10c5}'), ('\u{10c7}', '\u{10c7}'), ('\u{10cd}',
+- '\u{10cd}'), ('\u{10d0}', '\u{10fa}'), ('\u{10fc}', '\u{1248}'), ('\u{124a}', '\u{124d}'),
+- ('\u{1250}', '\u{1256}'), ('\u{1258}', '\u{1258}'), ('\u{125a}', '\u{125d}'), ('\u{1260}',
+- '\u{1288}'), ('\u{128a}', '\u{128d}'), ('\u{1290}', '\u{12b0}'), ('\u{12b2}', '\u{12b5}'),
+- ('\u{12b8}', '\u{12be}'), ('\u{12c0}', '\u{12c0}'), ('\u{12c2}', '\u{12c5}'), ('\u{12c8}',
+- '\u{12d6}'), ('\u{12d8}', '\u{1310}'), ('\u{1312}', '\u{1315}'), ('\u{1318}', '\u{135a}'),
+- ('\u{1380}', '\u{138f}'), ('\u{13a0}', '\u{13f5}'), ('\u{13f8}', '\u{13fd}'), ('\u{1401}',
+- '\u{166c}'), ('\u{166f}', '\u{167f}'), ('\u{1681}', '\u{169a}'), ('\u{16a0}', '\u{16ea}'),
+- ('\u{16ee}', '\u{16f8}'), ('\u{1700}', '\u{170c}'), ('\u{170e}', '\u{1711}'), ('\u{1720}',
+- '\u{1731}'), ('\u{1740}', '\u{1751}'), ('\u{1760}', '\u{176c}'), ('\u{176e}', '\u{1770}'),
+- ('\u{1780}', '\u{17b3}'), ('\u{17d7}', '\u{17d7}'), ('\u{17dc}', '\u{17dc}'), ('\u{1820}',
+- '\u{1877}'), ('\u{1880}', '\u{18a8}'), ('\u{18aa}', '\u{18aa}'), ('\u{18b0}', '\u{18f5}'),
+- ('\u{1900}', '\u{191e}'), ('\u{1950}', '\u{196d}'), ('\u{1970}', '\u{1974}'), ('\u{1980}',
+- '\u{19ab}'), ('\u{19b0}', '\u{19c9}'), ('\u{1a00}', '\u{1a16}'), ('\u{1a20}', '\u{1a54}'),
+- ('\u{1aa7}', '\u{1aa7}'), ('\u{1b05}', '\u{1b33}'), ('\u{1b45}', '\u{1b4b}'), ('\u{1b83}',
+- '\u{1ba0}'), ('\u{1bae}', '\u{1baf}'), ('\u{1bba}', '\u{1be5}'), ('\u{1c00}', '\u{1c23}'),
+- ('\u{1c4d}', '\u{1c4f}'), ('\u{1c5a}', '\u{1c7d}'), ('\u{1c80}', '\u{1c88}'), ('\u{1ce9}',
+- '\u{1cec}'), ('\u{1cee}', '\u{1cf1}'), ('\u{1cf5}', '\u{1cf6}'), ('\u{1d00}', '\u{1dbf}'),
+- ('\u{1e00}', '\u{1f15}'), ('\u{1f18}', '\u{1f1d}'), ('\u{1f20}', '\u{1f45}'), ('\u{1f48}',
+- '\u{1f4d}'), ('\u{1f50}', '\u{1f57}'), ('\u{1f59}', '\u{1f59}'), ('\u{1f5b}', '\u{1f5b}'),
+- ('\u{1f5d}', '\u{1f5d}'), ('\u{1f5f}', '\u{1f7d}'), ('\u{1f80}', '\u{1fb4}'), ('\u{1fb6}',
+- '\u{1fbc}'), ('\u{1fbe}', '\u{1fbe}'), ('\u{1fc2}', '\u{1fc4}'), ('\u{1fc6}', '\u{1fcc}'),
+- ('\u{1fd0}', '\u{1fd3}'), ('\u{1fd6}', '\u{1fdb}'), ('\u{1fe0}', '\u{1fec}'), ('\u{1ff2}',
+- '\u{1ff4}'), ('\u{1ff6}', '\u{1ffc}'), ('\u{2071}', '\u{2071}'), ('\u{207f}', '\u{207f}'),
+- ('\u{2090}', '\u{209c}'), ('\u{2102}', '\u{2102}'), ('\u{2107}', '\u{2107}'), ('\u{210a}',
+- '\u{2113}'), ('\u{2115}', '\u{2115}'), ('\u{2118}', '\u{211d}'), ('\u{2124}', '\u{2124}'),
+- ('\u{2126}', '\u{2126}'), ('\u{2128}', '\u{2128}'), ('\u{212a}', '\u{2139}'), ('\u{213c}',
+- '\u{213f}'), ('\u{2145}', '\u{2149}'), ('\u{214e}', '\u{214e}'), ('\u{2160}', '\u{2188}'),
+- ('\u{2c00}', '\u{2c2e}'), ('\u{2c30}', '\u{2c5e}'), ('\u{2c60}', '\u{2ce4}'), ('\u{2ceb}',
+- '\u{2cee}'), ('\u{2cf2}', '\u{2cf3}'), ('\u{2d00}', '\u{2d25}'), ('\u{2d27}', '\u{2d27}'),
+- ('\u{2d2d}', '\u{2d2d}'), ('\u{2d30}', '\u{2d67}'), ('\u{2d6f}', '\u{2d6f}'), ('\u{2d80}',
+- '\u{2d96}'), ('\u{2da0}', '\u{2da6}'), ('\u{2da8}', '\u{2dae}'), ('\u{2db0}', '\u{2db6}'),
+- ('\u{2db8}', '\u{2dbe}'), ('\u{2dc0}', '\u{2dc6}'), ('\u{2dc8}', '\u{2dce}'), ('\u{2dd0}',
+- '\u{2dd6}'), ('\u{2dd8}', '\u{2dde}'), ('\u{3005}', '\u{3007}'), ('\u{3021}', '\u{3029}'),
+- ('\u{3031}', '\u{3035}'), ('\u{3038}', '\u{303c}'), ('\u{3041}', '\u{3096}'), ('\u{309d}',
+- '\u{309f}'), ('\u{30a1}', '\u{30fa}'), ('\u{30fc}', '\u{30ff}'), ('\u{3105}', '\u{312d}'),
+- ('\u{3131}', '\u{318e}'), ('\u{31a0}', '\u{31ba}'), ('\u{31f0}', '\u{31ff}'), ('\u{3400}',
+- '\u{4db5}'), ('\u{4e00}', '\u{9fd5}'), ('\u{a000}', '\u{a48c}'), ('\u{a4d0}', '\u{a4fd}'),
+- ('\u{a500}', '\u{a60c}'), ('\u{a610}', '\u{a61f}'), ('\u{a62a}', '\u{a62b}'), ('\u{a640}',
+- '\u{a66e}'), ('\u{a67f}', '\u{a69d}'), ('\u{a6a0}', '\u{a6ef}'), ('\u{a717}', '\u{a71f}'),
+- ('\u{a722}', '\u{a788}'), ('\u{a78b}', '\u{a7ae}'), ('\u{a7b0}', '\u{a7b7}'), ('\u{a7f7}',
+- '\u{a801}'), ('\u{a803}', '\u{a805}'), ('\u{a807}', '\u{a80a}'), ('\u{a80c}', '\u{a822}'),
+- ('\u{a840}', '\u{a873}'), ('\u{a882}', '\u{a8b3}'), ('\u{a8f2}', '\u{a8f7}'), ('\u{a8fb}',
+- '\u{a8fb}'), ('\u{a8fd}', '\u{a8fd}'), ('\u{a90a}', '\u{a925}'), ('\u{a930}', '\u{a946}'),
+- ('\u{a960}', '\u{a97c}'), ('\u{a984}', '\u{a9b2}'), ('\u{a9cf}', '\u{a9cf}'), ('\u{a9e0}',
+- '\u{a9e4}'), ('\u{a9e6}', '\u{a9ef}'), ('\u{a9fa}', '\u{a9fe}'), ('\u{aa00}', '\u{aa28}'),
+- ('\u{aa40}', '\u{aa42}'), ('\u{aa44}', '\u{aa4b}'), ('\u{aa60}', '\u{aa76}'), ('\u{aa7a}',
+- '\u{aa7a}'), ('\u{aa7e}', '\u{aaaf}'), ('\u{aab1}', '\u{aab1}'), ('\u{aab5}', '\u{aab6}'),
+- ('\u{aab9}', '\u{aabd}'), ('\u{aac0}', '\u{aac0}'), ('\u{aac2}', '\u{aac2}'), ('\u{aadb}',
+- '\u{aadd}'), ('\u{aae0}', '\u{aaea}'), ('\u{aaf2}', '\u{aaf4}'), ('\u{ab01}', '\u{ab06}'),
+- ('\u{ab09}', '\u{ab0e}'), ('\u{ab11}', '\u{ab16}'), ('\u{ab20}', '\u{ab26}'), ('\u{ab28}',
+- '\u{ab2e}'), ('\u{ab30}', '\u{ab5a}'), ('\u{ab5c}', '\u{ab65}'), ('\u{ab70}', '\u{abe2}'),
+- ('\u{ac00}', '\u{d7a3}'), ('\u{d7b0}', '\u{d7c6}'), ('\u{d7cb}', '\u{d7fb}'), ('\u{f900}',
+- '\u{fa6d}'), ('\u{fa70}', '\u{fad9}'), ('\u{fb00}', '\u{fb06}'), ('\u{fb13}', '\u{fb17}'),
+- ('\u{fb1d}', '\u{fb1d}'), ('\u{fb1f}', '\u{fb28}'), ('\u{fb2a}', '\u{fb36}'), ('\u{fb38}',
+- '\u{fb3c}'), ('\u{fb3e}', '\u{fb3e}'), ('\u{fb40}', '\u{fb41}'), ('\u{fb43}', '\u{fb44}'),
+- ('\u{fb46}', '\u{fbb1}'), ('\u{fbd3}', '\u{fc5d}'), ('\u{fc64}', '\u{fd3d}'), ('\u{fd50}',
+- '\u{fd8f}'), ('\u{fd92}', '\u{fdc7}'), ('\u{fdf0}', '\u{fdf9}'), ('\u{fe71}', '\u{fe71}'),
+- ('\u{fe73}', '\u{fe73}'), ('\u{fe77}', '\u{fe77}'), ('\u{fe79}', '\u{fe79}'), ('\u{fe7b}',
+- '\u{fe7b}'), ('\u{fe7d}', '\u{fe7d}'), ('\u{fe7f}', '\u{fefc}'), ('\u{ff21}', '\u{ff3a}'),
+- ('\u{ff41}', '\u{ff5a}'), ('\u{ff66}', '\u{ff9d}'), ('\u{ffa0}', '\u{ffbe}'), ('\u{ffc2}',
+- '\u{ffc7}'), ('\u{ffca}', '\u{ffcf}'), ('\u{ffd2}', '\u{ffd7}'), ('\u{ffda}', '\u{ffdc}'),
+- ('\u{10000}', '\u{1000b}'), ('\u{1000d}', '\u{10026}'), ('\u{10028}', '\u{1003a}'),
+- ('\u{1003c}', '\u{1003d}'), ('\u{1003f}', '\u{1004d}'), ('\u{10050}', '\u{1005d}'),
+- ('\u{10080}', '\u{100fa}'), ('\u{10140}', '\u{10174}'), ('\u{10280}', '\u{1029c}'),
+- ('\u{102a0}', '\u{102d0}'), ('\u{10300}', '\u{1031f}'), ('\u{10330}', '\u{1034a}'),
+- ('\u{10350}', '\u{10375}'), ('\u{10380}', '\u{1039d}'), ('\u{103a0}', '\u{103c3}'),
+- ('\u{103c8}', '\u{103cf}'), ('\u{103d1}', '\u{103d5}'), ('\u{10400}', '\u{1049d}'),
+- ('\u{104b0}', '\u{104d3}'), ('\u{104d8}', '\u{104fb}'), ('\u{10500}', '\u{10527}'),
+- ('\u{10530}', '\u{10563}'), ('\u{10600}', '\u{10736}'), ('\u{10740}', '\u{10755}'),
+- ('\u{10760}', '\u{10767}'), ('\u{10800}', '\u{10805}'), ('\u{10808}', '\u{10808}'),
+- ('\u{1080a}', '\u{10835}'), ('\u{10837}', '\u{10838}'), ('\u{1083c}', '\u{1083c}'),
+- ('\u{1083f}', '\u{10855}'), ('\u{10860}', '\u{10876}'), ('\u{10880}', '\u{1089e}'),
+- ('\u{108e0}', '\u{108f2}'), ('\u{108f4}', '\u{108f5}'), ('\u{10900}', '\u{10915}'),
+- ('\u{10920}', '\u{10939}'), ('\u{10980}', '\u{109b7}'), ('\u{109be}', '\u{109bf}'),
+- ('\u{10a00}', '\u{10a00}'), ('\u{10a10}', '\u{10a13}'), ('\u{10a15}', '\u{10a17}'),
+- ('\u{10a19}', '\u{10a33}'), ('\u{10a60}', '\u{10a7c}'), ('\u{10a80}', '\u{10a9c}'),
+- ('\u{10ac0}', '\u{10ac7}'), ('\u{10ac9}', '\u{10ae4}'), ('\u{10b00}', '\u{10b35}'),
+- ('\u{10b40}', '\u{10b55}'), ('\u{10b60}', '\u{10b72}'), ('\u{10b80}', '\u{10b91}'),
+- ('\u{10c00}', '\u{10c48}'), ('\u{10c80}', '\u{10cb2}'), ('\u{10cc0}', '\u{10cf2}'),
+- ('\u{11003}', '\u{11037}'), ('\u{11083}', '\u{110af}'), ('\u{110d0}', '\u{110e8}'),
+- ('\u{11103}', '\u{11126}'), ('\u{11150}', '\u{11172}'), ('\u{11176}', '\u{11176}'),
+- ('\u{11183}', '\u{111b2}'), ('\u{111c1}', '\u{111c4}'), ('\u{111da}', '\u{111da}'),
+- ('\u{111dc}', '\u{111dc}'), ('\u{11200}', '\u{11211}'), ('\u{11213}', '\u{1122b}'),
+- ('\u{11280}', '\u{11286}'), ('\u{11288}', '\u{11288}'), ('\u{1128a}', '\u{1128d}'),
+- ('\u{1128f}', '\u{1129d}'), ('\u{1129f}', '\u{112a8}'), ('\u{112b0}', '\u{112de}'),
+- ('\u{11305}', '\u{1130c}'), ('\u{1130f}', '\u{11310}'), ('\u{11313}', '\u{11328}'),
+- ('\u{1132a}', '\u{11330}'), ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'),
+- ('\u{1133d}', '\u{1133d}'), ('\u{11350}', '\u{11350}'), ('\u{1135d}', '\u{11361}'),
+- ('\u{11400}', '\u{11434}'), ('\u{11447}', '\u{1144a}'), ('\u{11480}', '\u{114af}'),
++ ('\u{11332}', '\u{11333}'), ('\u{11335}', '\u{11339}'), ('\u{1133d}', '\u{1133d}'),
++ ('\u{11350}', '\u{11350}'), ('\u{1135d}', '\u{11361}'), ('\u{11400}', '\u{11434}'),
++ ('\u{11447}', '\u{1144a}'), ('\u{1145f}', '\u{1145f}'), ('\u{11480}', '\u{114af}'),
+ ('\u{114c4}', '\u{114c5}'), ('\u{114c7}', '\u{114c7}'), ('\u{11580}', '\u{115ae}'),
+ ('\u{115d8}', '\u{115db}'), ('\u{11600}', '\u{1162f}'), ('\u{11644}', '\u{11644}'),
+- ('\u{11680}', '\u{116aa}'), ('\u{11700}', '\u{11719}'), ('\u{118a0}', '\u{118df}'),
+- ('\u{118ff}', '\u{118ff}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'),
++ ('\u{11680}', '\u{116aa}'), ('\u{116b8}', '\u{116b8}'), ('\u{11700}', '\u{1171a}'),
++ ('\u{11800}', '\u{1182b}'), ('\u{118a0}', '\u{118df}'), ('\u{118ff}', '\u{118ff}'),
++ ('\u{119a0}', '\u{119a7}'), ('\u{119aa}', '\u{119d0}'), ('\u{119e1}', '\u{119e1}'),
++ ('\u{119e3}', '\u{119e3}'), ('\u{11a00}', '\u{11a00}'), ('\u{11a0b}', '\u{11a32}'),
++ ('\u{11a3a}', '\u{11a3a}'), ('\u{11a50}', '\u{11a50}'), ('\u{11a5c}', '\u{11a89}'),
++ ('\u{11a9d}', '\u{11a9d}'), ('\u{11ac0}', '\u{11af8}'), ('\u{11c00}', '\u{11c08}'),
+ ('\u{11c0a}', '\u{11c2e}'), ('\u{11c40}', '\u{11c40}'), ('\u{11c72}', '\u{11c8f}'),
++ ('\u{11d00}', '\u{11d06}'), ('\u{11d08}', '\u{11d09}'), ('\u{11d0b}', '\u{11d30}'),
++ ('\u{11d46}', '\u{11d46}'), ('\u{11d60}', '\u{11d65}'), ('\u{11d67}', '\u{11d68}'),
++ ('\u{11d6a}', '\u{11d89}'), ('\u{11d98}', '\u{11d98}'), ('\u{11ee0}', '\u{11ef2}'),
+ ('\u{12000}', '\u{12399}'), ('\u{12400}', '\u{1246e}'), ('\u{12480}', '\u{12543}'),
+ ('\u{13000}', '\u{1342e}'), ('\u{14400}', '\u{14646}'), ('\u{16800}', '\u{16a38}'),
+ ('\u{16a40}', '\u{16a5e}'), ('\u{16ad0}', '\u{16aed}'), ('\u{16b00}', '\u{16b2f}'),
+ ('\u{16b40}', '\u{16b43}'), ('\u{16b63}', '\u{16b77}'), ('\u{16b7d}', '\u{16b8f}'),
+- ('\u{16f00}', '\u{16f44}'), ('\u{16f50}', '\u{16f50}'), ('\u{16f93}', '\u{16f9f}'),
+- ('\u{16fe0}', '\u{16fe0}'), ('\u{17000}', '\u{187ec}'), ('\u{18800}', '\u{18af2}'),
+- ('\u{1b000}', '\u{1b001}'), ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'),
+- ('\u{1bc80}', '\u{1bc88}'), ('\u{1bc90}', '\u{1bc99}'), ('\u{1d400}', '\u{1d454}'),
+- ('\u{1d456}', '\u{1d49c}'), ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'),
+- ('\u{1d4a5}', '\u{1d4a6}'), ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'),
+- ('\u{1d4bb}', '\u{1d4bb}'), ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'),
+- ('\u{1d507}', '\u{1d50a}'), ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'),
+- ('\u{1d51e}', '\u{1d539}'), ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'),
+- ('\u{1d546}', '\u{1d546}'), ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'),
+- ('\u{1d6a8}', '\u{1d6c0}'), ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'),
+- ('\u{1d6fc}', '\u{1d714}'), ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'),
+- ('\u{1d750}', '\u{1d76e}'), ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'),
+- ('\u{1d7aa}', '\u{1d7c2}'), ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1e800}', '\u{1e8c4}'),
+- ('\u{1e900}', '\u{1e943}'), ('\u{1ee00}', '\u{1ee03}'), ('\u{1ee05}', '\u{1ee1f}'),
+- ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'), ('\u{1ee27}', '\u{1ee27}'),
+- ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'), ('\u{1ee39}', '\u{1ee39}'),
+- ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'), ('\u{1ee47}', '\u{1ee47}'),
+- ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'), ('\u{1ee4d}', '\u{1ee4f}'),
+- ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'), ('\u{1ee57}', '\u{1ee57}'),
+- ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'), ('\u{1ee5d}', '\u{1ee5d}'),
+- ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'), ('\u{1ee64}', '\u{1ee64}'),
+- ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'), ('\u{1ee74}', '\u{1ee77}'),
+- ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'), ('\u{1ee80}', '\u{1ee89}'),
+- ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'), ('\u{1eea5}', '\u{1eea9}'),
+- ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'), ('\u{2a700}', '\u{2b734}'),
+- ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'), ('\u{2f800}', '\u{2fa1d}')
++ ('\u{16e40}', '\u{16e7f}'), ('\u{16f00}', '\u{16f4a}'), ('\u{16f50}', '\u{16f50}'),
++ ('\u{16f93}', '\u{16f9f}'), ('\u{16fe0}', '\u{16fe1}'), ('\u{16fe3}', '\u{16fe3}'),
++ ('\u{17000}', '\u{187f7}'), ('\u{18800}', '\u{18af2}'), ('\u{1b000}', '\u{1b11e}'),
++ ('\u{1b150}', '\u{1b152}'), ('\u{1b164}', '\u{1b167}'), ('\u{1b170}', '\u{1b2fb}'),
++ ('\u{1bc00}', '\u{1bc6a}'), ('\u{1bc70}', '\u{1bc7c}'), ('\u{1bc80}', '\u{1bc88}'),
++ ('\u{1bc90}', '\u{1bc99}'), ('\u{1d400}', '\u{1d454}'), ('\u{1d456}', '\u{1d49c}'),
++ ('\u{1d49e}', '\u{1d49f}'), ('\u{1d4a2}', '\u{1d4a2}'), ('\u{1d4a5}', '\u{1d4a6}'),
++ ('\u{1d4a9}', '\u{1d4ac}'), ('\u{1d4ae}', '\u{1d4b9}'), ('\u{1d4bb}', '\u{1d4bb}'),
++ ('\u{1d4bd}', '\u{1d4c3}'), ('\u{1d4c5}', '\u{1d505}'), ('\u{1d507}', '\u{1d50a}'),
++ ('\u{1d50d}', '\u{1d514}'), ('\u{1d516}', '\u{1d51c}'), ('\u{1d51e}', '\u{1d539}'),
++ ('\u{1d53b}', '\u{1d53e}'), ('\u{1d540}', '\u{1d544}'), ('\u{1d546}', '\u{1d546}'),
++ ('\u{1d54a}', '\u{1d550}'), ('\u{1d552}', '\u{1d6a5}'), ('\u{1d6a8}', '\u{1d6c0}'),
++ ('\u{1d6c2}', '\u{1d6da}'), ('\u{1d6dc}', '\u{1d6fa}'), ('\u{1d6fc}', '\u{1d714}'),
++ ('\u{1d716}', '\u{1d734}'), ('\u{1d736}', '\u{1d74e}'), ('\u{1d750}', '\u{1d76e}'),
++ ('\u{1d770}', '\u{1d788}'), ('\u{1d78a}', '\u{1d7a8}'), ('\u{1d7aa}', '\u{1d7c2}'),
++ ('\u{1d7c4}', '\u{1d7cb}'), ('\u{1e100}', '\u{1e12c}'), ('\u{1e137}', '\u{1e13d}'),
++ ('\u{1e14e}', '\u{1e14e}'), ('\u{1e2c0}', '\u{1e2eb}'), ('\u{1e800}', '\u{1e8c4}'),
++ ('\u{1e900}', '\u{1e943}'), ('\u{1e94b}', '\u{1e94b}'), ('\u{1ee00}', '\u{1ee03}'),
++ ('\u{1ee05}', '\u{1ee1f}'), ('\u{1ee21}', '\u{1ee22}'), ('\u{1ee24}', '\u{1ee24}'),
++ ('\u{1ee27}', '\u{1ee27}'), ('\u{1ee29}', '\u{1ee32}'), ('\u{1ee34}', '\u{1ee37}'),
++ ('\u{1ee39}', '\u{1ee39}'), ('\u{1ee3b}', '\u{1ee3b}'), ('\u{1ee42}', '\u{1ee42}'),
++ ('\u{1ee47}', '\u{1ee47}'), ('\u{1ee49}', '\u{1ee49}'), ('\u{1ee4b}', '\u{1ee4b}'),
++ ('\u{1ee4d}', '\u{1ee4f}'), ('\u{1ee51}', '\u{1ee52}'), ('\u{1ee54}', '\u{1ee54}'),
++ ('\u{1ee57}', '\u{1ee57}'), ('\u{1ee59}', '\u{1ee59}'), ('\u{1ee5b}', '\u{1ee5b}'),
++ ('\u{1ee5d}', '\u{1ee5d}'), ('\u{1ee5f}', '\u{1ee5f}'), ('\u{1ee61}', '\u{1ee62}'),
++ ('\u{1ee64}', '\u{1ee64}'), ('\u{1ee67}', '\u{1ee6a}'), ('\u{1ee6c}', '\u{1ee72}'),
++ ('\u{1ee74}', '\u{1ee77}'), ('\u{1ee79}', '\u{1ee7c}'), ('\u{1ee7e}', '\u{1ee7e}'),
++ ('\u{1ee80}', '\u{1ee89}'), ('\u{1ee8b}', '\u{1ee9b}'), ('\u{1eea1}', '\u{1eea3}'),
++ ('\u{1eea5}', '\u{1eea9}'), ('\u{1eeab}', '\u{1eebb}'), ('\u{20000}', '\u{2a6d6}'),
++ ('\u{2a700}', '\u{2b734}'), ('\u{2b740}', '\u{2b81d}'), ('\u{2b820}', '\u{2cea1}'),
++ ('\u{2ceb0}', '\u{2ebe0}'), ('\u{2f800}', '\u{2fa1d}')
+ ];
+
+ pub fn XID_Start(c: char) -> bool {
+diff --git a/third_party/rust/unicode-xid/src/tests.rs b/third_party/rust/unicode-xid/src/tests.rs
+--- a/third_party/rust/unicode-xid/src/tests.rs
++++ b/third_party/rust/unicode-xid/src/tests.rs
+@@ -15,8 +15,6 @@
+ #[cfg(feature = "bench")]
+ use std::prelude::v1::*;
+
+-use super::UnicodeXID;
+-
+ #[cfg(feature = "bench")]
+ #[bench]
+ fn cargo_is_xid_start(b: &mut Bencher) {
+@@ -24,7 +22,7 @@
+
+ b.bytes = string.len() as u64;
+ b.iter(|| {
+- string.chars().all(UnicodeXID::is_xid_start)
++ string.chars().all(super::UnicodeXID::is_xid_start)
+ });
+ }
+
+@@ -46,7 +44,7 @@
+
+ b.bytes = string.len() as u64;
+ b.iter(|| {
+- string.chars().all(UnicodeXID::is_xid_continue)
++ string.chars().all(super::UnicodeXID::is_xid_continue)
+ });
+ }
+
+@@ -69,7 +67,7 @@
+ ];
+
+ for ch in &chars {
+- assert!(UnicodeXID::is_xid_start(*ch), "{}", ch);
++ assert!(super::UnicodeXID::is_xid_start(*ch), "{}", ch);
+ }
+ }
+
+@@ -83,7 +81,7 @@
+ ];
+
+ for ch in &chars {
+- assert!(!UnicodeXID::is_xid_start(*ch), "{}", ch);
++ assert!(!super::UnicodeXID::is_xid_start(*ch), "{}", ch);
+ }
+ }
+
+@@ -95,7 +93,7 @@
+ ];
+
+ for ch in &chars {
+- assert!(UnicodeXID::is_xid_continue(*ch), "{}", ch);
++ assert!(super::UnicodeXID::is_xid_continue(*ch), "{}", ch);
+ }
+ }
+
+@@ -108,6 +106,6 @@
+ ];
+
+ for &ch in &chars {
+- assert!(!UnicodeXID::is_xid_continue(ch), "{}", ch);
++ assert!(!super::UnicodeXID::is_xid_continue(ch), "{}", ch);
+ }
+ }
+diff --git a/third_party/rust/which/.cargo-checksum.json b/third_party/rust/which/.cargo-checksum.json
+deleted file mode 100644
+--- a/third_party/rust/which/.cargo-checksum.json
++++ /dev/null
+@@ -1 +0,0 @@
+-{"files":{"Cargo.toml":"72267e6b8f7e153fc8adb396837ffd1fd46bf3c5ec51908f830ac92327d812e5","LICENSE.txt":"0041560f5d419c30e1594567f3b7ac2bc078ff6a68f437e0348ba85d9cf99112","README.md":"b300f303f88ca776a8f5f38050ca7c25ce9cc866dcb4a69e414aa1d45c6bed14","src/lib.rs":"4ead66ddef1b6824a656ff1be692b74dc099aae944fbb677352ad2a0c78782d3"},"package":"4be6cfa54dab45266e98b5d7be2f8ce959ddd49abd141a05d52dce4b07f803bb"}
+\ No newline at end of file
+diff --git a/third_party/rust/which/Cargo.toml b/third_party/rust/which/Cargo.toml
+deleted file mode 100644
+--- a/third_party/rust/which/Cargo.toml
++++ /dev/null
+@@ -1,26 +0,0 @@
+-# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+-#
+-# When uploading crates to the registry Cargo will automatically
+-# "normalize" Cargo.toml files for maximal compatibility
+-# with all versions of Cargo and also rewrite `path` dependencies
+-# to registry (e.g. crates.io) dependencies
+-#
+-# If you believe there's an error in this file please file an
+-# issue against the rust-lang/cargo repository. If you're
+-# editing this file be aware that the upstream Cargo.toml
+-# will likely look very different (and much more reasonable)
+-
+-[package]
+-name = "which"
+-version = "1.0.3"
+-authors = ["fangyuanziti <tiziyuanfang@gmail.com>"]
+-description = "A Rust equivalent of Unix command \"which\". Locate installed execuable in cross platforms."
+-readme = "README.md"
+-keywords = ["which", "which-rs", "unix", "command"]
+-categories = ["os", "filesystem"]
+-license = "MIT"
+-repository = "https://github.com/fangyuanziti/which-rs.git"
+-[dependencies.libc]
+-version = "0.2.10"
+-[dev-dependencies.tempdir]
+-version = "0.3.4"
+diff --git a/third_party/rust/which/LICENSE.txt b/third_party/rust/which/LICENSE.txt
+deleted file mode 100644
+--- a/third_party/rust/which/LICENSE.txt
++++ /dev/null
+@@ -1,19 +0,0 @@
+-Copyright (c) 2015 fangyuanziti
+-
+-Permission is hereby granted, free of charge, to any person obtaining a copy
+-of this software and associated documentation files (the "Software"), to deal
+-in the Software without restriction, including without limitation the rights
+-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+-copies of the Software, and to permit persons to whom the Software is
+-furnished to do so, subject to the following conditions:
+-
+-The above copyright notice and this permission notice shall be included in
+-all copies or substantial portions of the Software.
+-
+-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+-THE SOFTWARE.
+diff --git a/third_party/rust/which/README.md b/third_party/rust/which/README.md
+deleted file mode 100644
+--- a/third_party/rust/which/README.md
++++ /dev/null
+@@ -1,27 +0,0 @@
+-[![Build Status](https://travis-ci.org/fangyuanziti/which-rs.svg?branch=master)](https://travis-ci.org/fangyuanziti/which-rs)
+-
+-# which
+-
+-A Rust equivalent of Unix command "which". Locate installed execuable in cross platforms.
+-
+-## Support platforms
+-
+-* Linux
+-* Windows
+-* macOS
+-
+-## Example
+-
+-To find which rustc exectable binary is using. Locate installed execuable in cross platforms.
+-
+-``` rust
+-use which::which;
+-
+-let result = which::which("rustc").unwrap();
+-assert_eq!(result, PathBuf::from("/usr/bin/rustc"));
+-
+-```
+-
+-## Documentation
+-
+-The documentation is [available online](https://docs.rs/which/).
+diff --git a/third_party/rust/which/src/lib.rs b/third_party/rust/which/src/lib.rs
+deleted file mode 100644
+--- a/third_party/rust/which/src/lib.rs
++++ /dev/null
+@@ -1,445 +0,0 @@
+-//! which
+-//!
+-//! A Rust equivalent of Unix command `which(1)`.
+-//! # Example:
+-//!
+-//! To find which rustc executable binary is using:
+-//!
+-//! ``` norun
+-//! use which::which;
+-//!
+-//! let result = which::which("rustc").unwrap();
+-//! assert_eq!(result, PathBuf::from("/usr/bin/rustc"));
+-//!
+-//! ```
+-
+-extern crate libc;
+-#[cfg(test)]
+-extern crate tempdir;
+-
+-use std::ascii::AsciiExt;
+-use std::path::{Path,PathBuf};
+-use std::{env, fs};
+-#[cfg(unix)]
+-use std::ffi::CString;
+-use std::ffi::OsStr;
+-#[cfg(unix)]
+-use std::os::unix::ffi::OsStrExt;
+-
+-/// Like `Path::with_extension`, but don't replace an existing extension.
+-fn ensure_exe_extension<T: AsRef<Path>>(path: T) -> PathBuf {
+- if env::consts::EXE_EXTENSION.is_empty() {
+- // Nothing to do.
+- path.as_ref().to_path_buf()
+- } else {
+- match path.as_ref().extension().and_then(|e| e.to_str()).map(|e| e.eq_ignore_ascii_case(env::consts::EXE_EXTENSION)) {
+- // Already has the right extension.
+- Some(true) => path.as_ref().to_path_buf(),
+- _ => {
+- // Append the extension.
+- let mut s = path.as_ref().to_path_buf().into_os_string();
+- s.push(".");
+- s.push(env::consts::EXE_EXTENSION);
+- PathBuf::from(s)
+- }
+- }
+- }
+-}
+-
+-
+-/// Find a exectable binary's path by name.
+-///
+-/// If given an absolute path, returns it if the file exists and is executable.
+-///
+-/// If given a relative path, returns an absolute path to the file if
+-/// it exists and is executable.
+-///
+-/// If given a string without path separators, looks for a file named
+-/// `binary_name` at each directory in `$PATH` and if it finds an executable
+-/// file there, returns it.
+-///
+-/// # Example
+-///
+-/// ``` norun
+-/// use which::which;
+-/// use std::path::PathBuf;
+-///
+-/// let result = which::which("rustc").unwrap();
+-/// assert_eq!(result, PathBuf::from("/usr/bin/rustc"));
+-///
+-/// ```
+-pub fn which<T: AsRef<OsStr>>(binary_name: T)
+- -> Result<PathBuf, &'static str> {
+- env::current_dir()
+- .or_else(|_| Err("Couldn't get current directory"))
+- .and_then(|cwd| which_in(binary_name, env::var_os("PATH"), &cwd))
+-}
+-
+-/// Find `binary_name` in the path list `paths`, using `cwd` to resolve relative paths.
+-pub fn which_in<T, U, V>(binary_name: T, paths: Option<U>, cwd: V)
+- -> Result<PathBuf, &'static str>
+- where T: AsRef<OsStr>,
+- U: AsRef<OsStr>,
+- V: AsRef<Path> {
+- let binary_checker = CompositeChecker::new()
+- .add_checker(Box::new(ExistedChecker::new()))
+- .add_checker(Box::new(ExecutableChecker::new()));
+-
+- let finder = Finder::new();
+-
+- finder.find(binary_name, paths, cwd, &binary_checker)
+-}
+-
+-struct Finder;
+-
+-impl Finder {
+- fn new() -> Finder {
+- Finder
+- }
+-
+- fn find<T, U, V>(&self, binary_name: T, paths: Option<U>, cwd: V,
+- binary_checker: &Checker)
+- -> Result<PathBuf, &'static str>
+- where T: AsRef<OsStr>,
+- U: AsRef<OsStr>,
+- V: AsRef<Path> {
+-
+- let path = ensure_exe_extension(binary_name.as_ref());
+-
+- // Does it have a path separator?
+- if path.components().count() > 1 {
+- if path.is_absolute() {
+- if binary_checker.is_valid(&path) {
+- // Already fine.
+- Ok(path)
+- } else {
+- // Absolute path but it's not usable.
+- Err("Bad absolute path")
+- }
+- } else {
+- // Try to make it absolute.
+- let mut new_path = PathBuf::from(cwd.as_ref());
+- new_path.push(path);
+- let new_path = ensure_exe_extension(new_path);
+- if binary_checker.is_valid(&new_path) {
+- Ok(new_path)
+- } else {
+- // File doesn't exist or isn't executable.
+- Err("Bad relative path")
+- }
+- }
+- } else {
+- // No separator, look it up in `paths`.
+- paths.and_then(
+- |paths|
+- env::split_paths(paths.as_ref())
+- .map(|p| ensure_exe_extension(p.join(binary_name.as_ref())))
+- .skip_while(|p| !(binary_checker.is_valid(&p)))
+- .next())
+- .ok_or("Cannot find binary path")
+- }
+- }
+-}
+-
+-
+-trait Checker {
+- fn is_valid(&self, path: &Path) -> bool;
+-}
+-
+-struct ExecutableChecker;
+-
+-impl ExecutableChecker {
+- fn new() -> ExecutableChecker {
+- ExecutableChecker
+- }
+-}
+-
+-impl Checker for ExecutableChecker {
+- #[cfg(unix)]
+- fn is_valid(&self, path: &Path) -> bool {
+- CString::new(path.as_os_str().as_bytes())
+- .and_then(|c| {
+- Ok(unsafe { libc::access(c.as_ptr(), libc::X_OK) == 0 })
+- })
+- .unwrap_or(false)
+- }
+-
+- #[cfg(not(unix))]
+- fn is_valid(&self, _path: &Path) -> bool { true }
+-}
+-
+-struct ExistedChecker;
+-
+-impl ExistedChecker {
+- fn new() -> ExistedChecker {
+- ExistedChecker
+- }
+-}
+-
+-impl Checker for ExistedChecker {
+- fn is_valid(&self, path: &Path) -> bool {
+- fs::metadata(path).map(|metadata|{
+- metadata.is_file()
+- }).unwrap_or(false)
+- }
+-}
+-
+-struct CompositeChecker {
+- checkers: Vec<Box<Checker>>
+-}
+-
+-impl CompositeChecker {
+- fn new() -> CompositeChecker {
+- CompositeChecker {
+- checkers: Vec::new()
+- }
+- }
+-
+- fn add_checker(mut self, checker: Box<Checker>) -> CompositeChecker {
+- self.checkers.push(checker);
+- self
+- }
+-}
+-
+-impl Checker for CompositeChecker {
+- fn is_valid(&self, path: &Path) -> bool {
+- self.checkers.iter()
+- .all(|checker| checker.is_valid(path))
+- }
+-}
+-
+-#[test]
+-fn test_exe_extension() {
+- let expected = PathBuf::from("foo").with_extension(env::consts::EXE_EXTENSION);
+- assert_eq!(expected, ensure_exe_extension(PathBuf::from("foo")));
+- let p = expected.clone();
+- assert_eq!(expected, ensure_exe_extension(p));
+-}
+-
+-#[test]
+-#[cfg(windows)]
+-fn test_exe_extension_existing_extension() {
+- assert_eq!(PathBuf::from("foo.bar.exe"),
+- ensure_exe_extension("foo.bar"));
+-}
+-
+-#[test]
+-#[cfg(windows)]
+-fn test_exe_extension_existing_extension_uppercase() {
+- assert_eq!(PathBuf::from("foo.EXE"),
+- ensure_exe_extension("foo.EXE"));
+-}
+-
+-#[cfg(test)]
+-mod test {
+- use super::*;
+-
+- use std::env;
+- use std::ffi::{OsStr,OsString};
+- use std::fs;
+- use std::io;
+- use std::path::{Path,PathBuf};
+- use tempdir::TempDir;
+-
+- struct TestFixture {
+- /// Temp directory.
+- pub tempdir: TempDir,
+- /// $PATH
+- pub paths: OsString,
+- /// Binaries created in $PATH
+- pub bins: Vec<PathBuf>,
+- }
+-
+- const SUBDIRS: &'static [&'static str] = &["a", "b", "c"];
+- const BIN_NAME: &'static str = "bin";
+-
+- #[cfg(unix)]
+- fn mk_bin(dir: &Path, path: &str) -> io::Result<PathBuf> {
+- use libc;
+- use std::os::unix::fs::OpenOptionsExt;
+- let bin = dir.join(path).with_extension(env::consts::EXE_EXTENSION);
+- fs::OpenOptions::new()
+- .write(true)
+- .create(true)
+- .mode(0o666 | (libc::S_IXUSR as u32))
+- .open(&bin)
+- .and_then(|_f| bin.canonicalize())
+- }
+-
+- fn touch(dir: &Path, path: &str) -> io::Result<PathBuf> {
+- let b = dir.join(path).with_extension(env::consts::EXE_EXTENSION);
+- fs::File::create(&b)
+- .and_then(|_f| b.canonicalize())
+- }
+-
+- #[cfg(not(unix))]
+- fn mk_bin(dir: &Path, path: &str) -> io::Result<PathBuf> {
+- touch(dir, path)
+- }
+-
+- impl TestFixture {
+- pub fn new() -> TestFixture {
+- let tempdir = TempDir::new("which_tests").unwrap();
+- let mut builder = fs::DirBuilder::new();
+- builder.recursive(true);
+- let mut paths = vec!();
+- let mut bins = vec!();
+- for d in SUBDIRS.iter() {
+- let p = tempdir.path().join(d);
+- builder.create(&p).unwrap();
+- bins.push(mk_bin(&p, &BIN_NAME).unwrap());
+- paths.push(p);
+- }
+- TestFixture {
+- tempdir: tempdir,
+- paths: env::join_paths(paths).unwrap(),
+- bins: bins,
+- }
+- }
+-
+- #[allow(dead_code)]
+- pub fn touch(&self, path: &str) -> io::Result<PathBuf> {
+- touch(self.tempdir.path(), &path)
+- }
+-
+- pub fn mk_bin(&self, path: &str) -> io::Result<PathBuf> {
+- mk_bin(self.tempdir.path(), &path)
+- }
+- }
+-
+- fn _which<T: AsRef<OsStr>>(f: &TestFixture, path: T) -> Result<PathBuf, &'static str> {
+- which_in(path, Some(f.paths.clone()), f.tempdir.path())
+- }
+-
+- #[test]
+- #[cfg(unix)]
+- fn it_works() {
+- use std::process::Command;
+- let result = which("rustc");
+- assert!(result.is_ok());
+-
+- let which_result = Command::new("which")
+- .arg("rustc")
+- .output();
+-
+- assert_eq!(String::from(result.unwrap().to_str().unwrap()),
+- String::from_utf8(which_result.unwrap().stdout).unwrap().trim());
+- }
+-
+- #[test]
+- fn test_which() {
+- let f = TestFixture::new();
+- assert_eq!(_which(&f, &BIN_NAME).unwrap().canonicalize().unwrap(),
+- f.bins[0])
+- }
+-
+- #[test]
+- fn test_which_extension() {
+- let f = TestFixture::new();
+- let b = Path::new(&BIN_NAME).with_extension(env::consts::EXE_EXTENSION);
+- assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
+- f.bins[0])
+- }
+-
+- #[test]
+- fn test_which_not_found() {
+- let f = TestFixture::new();
+- assert!(_which(&f, "a").is_err());
+- }
+-
+- #[test]
+- fn test_which_second() {
+- let f = TestFixture::new();
+- let b = f.mk_bin("b/another").unwrap();
+- assert_eq!(_which(&f, "another").unwrap().canonicalize().unwrap(), b);
+- }
+-
+- #[test]
+- fn test_which_absolute() {
+- let f = TestFixture::new();
+- assert_eq!(_which(&f, &f.bins[1]).unwrap().canonicalize().unwrap(),
+- f.bins[1].canonicalize().unwrap());
+- }
+-
+- #[test]
+- #[cfg(windows)]
+- fn test_which_absolute_path_case() {
+- // Test that an absolute path with an uppercase extension
+- // is accepted.
+- let f = TestFixture::new();
+- let p = f.bins[1].with_extension("EXE");
+- assert_eq!(_which(&f, &p).unwrap().canonicalize().unwrap(),
+- f.bins[1].canonicalize().unwrap());
+- }
+-
+- #[test]
+- fn test_which_absolute_extension() {
+- let f = TestFixture::new();
+- // Don't append EXE_EXTENSION here.
+- let b = f.bins[1].parent().unwrap().join(&BIN_NAME);
+- assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
+- f.bins[1].canonicalize().unwrap());
+- }
+-
+- #[test]
+- fn test_which_relative() {
+- let f = TestFixture::new();
+- assert_eq!(_which(&f, "b/bin").unwrap().canonicalize().unwrap(),
+- f.bins[1].canonicalize().unwrap());
+- }
+-
+- #[test]
+- fn test_which_relative_extension() {
+- // test_which_relative tests a relative path without an extension,
+- // so test a relative path with an extension here.
+- let f = TestFixture::new();
+- let b = Path::new("b/bin").with_extension(env::consts::EXE_EXTENSION);
+- assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
+- f.bins[1].canonicalize().unwrap());
+- }
+-
+- #[test]
+- #[cfg(windows)]
+- fn test_which_relative_extension_case() {
+- // Test that a relative path with an uppercase extension
+- // is accepted.
+- let f = TestFixture::new();
+- let b = Path::new("b/bin").with_extension("EXE");
+- assert_eq!(_which(&f, &b).unwrap().canonicalize().unwrap(),
+- f.bins[1].canonicalize().unwrap());
+- }
+-
+- #[test]
+- fn test_which_relative_leading_dot() {
+- let f = TestFixture::new();
+- assert_eq!(_which(&f, "./b/bin").unwrap().canonicalize().unwrap(),
+- f.bins[1].canonicalize().unwrap());
+- }
+-
+- #[test]
+- #[cfg(unix)]
+- fn test_which_non_executable() {
+- // Shouldn't return non-executable files.
+- let f = TestFixture::new();
+- f.touch("b/another").unwrap();
+- assert!(_which(&f, "another").is_err());
+- }
+-
+- #[test]
+- #[cfg(unix)]
+- fn test_which_absolute_non_executable() {
+- // Shouldn't return non-executable files, even if given an absolute path.
+- let f = TestFixture::new();
+- let b = f.touch("b/another").unwrap();
+- assert!(_which(&f, &b).is_err());
+- }
+-
+- #[test]
+- #[cfg(unix)]
+- fn test_which_relative_non_executable() {
+- // Shouldn't return non-executable files.
+- let f = TestFixture::new();
+- f.touch("b/another").unwrap();
+- assert!(_which(&f, "b/another").is_err());
+- }
+-}
+
diff --git a/libre/iceweasel/searchengines.patch b/libre/iceweasel/libre-searchengines.patch
index e42ca78e8..e42ca78e8 100644
--- a/libre/iceweasel/searchengines.patch
+++ b/libre/iceweasel/libre-searchengines.patch
diff --git a/libre/iceweasel/no-relinking.patch b/libre/iceweasel/no-relinking.patch
new file mode 100644
index 000000000..6dd988402
--- /dev/null
+++ b/libre/iceweasel/no-relinking.patch
@@ -0,0 +1,41 @@
+
+# HG changeset patch
+# User Mike Shal <mshal@mozilla.com>
+# Date 1570127498 25200
+# Node ID b8bc2504f108d8a2216ee11405cbbe4cf7a0eaec
+# Parent 9a4d6aacc48080f019024c02ac7da1fd576b39fe
+Bug XYZ - Only force re-linking on 1-tier PGO builds; r?#firefox-build-system-reviewers
+
+
+diff --git a/config/rules.mk b/config/rules.mk
+--- a/config/rules.mk
++++ b/config/rules.mk
+@@ -465,25 +465,27 @@ ifeq ($(OS_ARCH)_$(GNU_CC), WINNT_)
+ $(foreach pgd,$(wildcard *.pgd),pgomgr -clear $(pgd);)
+ else
+ ifdef GNU_CC
+ -$(RM) *.gcda
+ endif
+ endif
+ endif
+
++ifdef MOZ_1TIER_PGO
+ ifneq (,$(MOZ_PROFILE_GENERATE)$(MOZ_PROFILE_USE))
+ ifneq (,$(filter target,$(MAKECMDGOALS)))
+ ifdef GNU_CC
+ # Force rebuilding libraries and programs in both passes because each
+ # pass uses different object files.
+ $(PROGRAM) $(SHARED_LIBRARY) $(LIBRARY): FORCE
+ endif
+ endif
+ endif
++endif
+
+ endif # NO_PROFILE_GUIDED_OPTIMIZE
+
+ ##############################################
+
+ clean clobber realclean clobber_all::
+ -$(RM) $(ALL_TRASH)
+ -$(RM) -r $(ALL_TRASH_DIRS)
+